diff --git a/.github/ISSUE_TEMPLATE/deployment_bug_report.md b/.github/ISSUE_TEMPLATE/deployment_bug_report.md
index 6cf453cc13..fa569038c2 100644
--- a/.github/ISSUE_TEMPLATE/deployment_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/deployment_bug_report.md
@@ -29,7 +29,6 @@ Before reporting a problem please check the following. Replace the empty checkbo
List of packages
diff --git a/.github/ISSUE_TEMPLATE/release_checklist.md b/.github/ISSUE_TEMPLATE/release_checklist.md
index a25064faa1..575f5c9c53 100644
--- a/.github/ISSUE_TEMPLATE/release_checklist.md
+++ b/.github/ISSUE_TEMPLATE/release_checklist.md
@@ -14,9 +14,9 @@ Before reporting a problem please check the following. Replace the empty checkbo
Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deployment) section of our documentation when completing these steps.
-- [ ] Consult the `data-safe-haven/VERSIONING.md` guide and determine the version number of the new release. Record it in the title of this issue.
+- [ ] Consult the `data-safe-haven/VERSIONING.md` guide and determine the version number of the new release. Record it in the title of this issue
- [ ] Create a release branch called e.g. `release-v0.0.1`
-- [ ] Draft a changelog for the release similar to our previous releases, see https://github.com/alan-turing-institute/data-safe-haven/releases
+- [ ] Draft a changelog for the release similar to our [previous releases](https://github.com/alan-turing-institute/data-safe-haven/releases)
### For patch releases only
@@ -34,26 +34,27 @@ Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deplo
### For major releases only
- [ ] Confirm that a third party has carried out a full penetration test evaluating:
- 1. external attack surface
- 1. ability to exfiltrate data from the system
- 1. ability to transfer data between SREs
- 1. ability to escalate privileges on the SRD.
+ 1. external attack surface
+ 1. ability to exfiltrate data from the system
+ 1. ability to transfer data between SREs
+ 1. ability to escalate privileges on the SRD.
### Update documentation
- [ ] Update supported versions in `SECURITY.md`
- [ ] Update pen test results in `VERSIONING.md`
-## :computer: Release information
+### Making the release
-- **Version number:** _
-- **SHM ID:** _
-- **T2 SRE ID:** _
-- **T3 SRE ID:** _
+- [ ] Merge release branch into `latest`
+- [ ] Create a tag of the form `v0.0.1` pointing to the most recent commit on `latest` (the merge that you just made)
+- [ ] Publish your draft GitHub release using this tag
+- [ ] Ensure docs for the latest version are built and deployed on ReadTheDocs
+- [ ] Push a build to PyPI
+- [ ] Announce release on communications channels
## :deciduous_tree: Deployment problems
-
diff --git a/.github/scripts/update_azure_data_studio.py b/.github/scripts/update_azure_data_studio.py
deleted file mode 100644
index 651e85fdfc..0000000000
--- a/.github/scripts/update_azure_data_studio.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#! /usr/bin/env python3
-from lxml import html
-import hashlib
-import requests
-
-remote_page = requests.get("https://docs.microsoft.com/en-us/sql/azure-data-studio/download-azure-data-studio", allow_redirects=True)
-root = html.fromstring(remote_page.content)
-short_link = root.xpath("//a[contains(text(), '.deb')]/@href")[0]
-
-remote_content = requests.get(short_link, allow_redirects=True)
-sha256 = hashlib.sha256(remote_content.content).hexdigest()
-version = remote_content.url.split("-")[-1].replace(".deb", "")
-remote = "/".join(remote_content.url.split("/")[:-1] + ["|DEBFILE|"])
-
-with open("deployment/secure_research_desktop/packages/deb-azuredatastudio.version", "w") as f_out:
- f_out.write(f"hash: {sha256}\n")
- f_out.write(f"version: {version}\n")
- f_out.write("debfile: azuredatastudio-linux-|VERSION|.deb\n")
- f_out.write(f"remote: {remote}\n")
diff --git a/.github/scripts/update_dbeaver_drivers.py b/.github/scripts/update_dbeaver_drivers.py
deleted file mode 100644
index 696a501858..0000000000
--- a/.github/scripts/update_dbeaver_drivers.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#! /usr/bin/env python3
-import json
-from lxml import html
-from natsort import natsorted
-import requests
-
-
-def get_latest_version(url, search_text):
- """
- Get latest version number of a database driver from the Maven repository.
-
- Fetches the HTML page at the given URL, then converts it to an lxml tree.
- Numeric strings are then extracted.
- Note that mostly numeric strings for some drivers contain non-numeric text,
- as different driver types exist for those drivers, even where the version number is the same.
- The largest (latest) version number of the driver is then returned.
-
- Parameters
- ----------
- url : str
- The URL of the Maven repository containing the driver
- search_text : str
- Text to search for in the repository, to distinguish the driver from other files
-
- Returns
- -------
- list
- The latest available version number of the driver
- """
-
- remote_page = requests.get(url, allow_redirects=True)
- root = html.fromstring(remote_page.content)
- return natsorted([v for v in root.xpath("//a[contains(text(), '" + search_text + "')]/@href") if v != "../"])[-1].replace("/", "")
-
-
-drivers = [
- {
- 'name': "mssql_jdbc",
- 'url': "https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/",
- 'search_text': "jre8/"
- },
- {
- 'name': "pgjdbc",
- 'url': "https://repo1.maven.org/maven2/org/postgresql/pgjdbc-versions/",
- 'search_text': "/"
- },
- {
- 'name': "postgresql",
- 'url': "https://repo1.maven.org/maven2/org/postgresql/postgresql/",
- 'search_text': "/"
- },
- {
- 'name': "postgis_geometry",
- 'url': "https://repo1.maven.org/maven2/net/postgis/postgis-geometry/",
- 'search_text': "/"
- },
- {
- 'name': "postgis_jdbc",
- 'url': "https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/",
- 'search_text': "/"
- },
- {
- 'name': "waffle_jna",
- 'url': "https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/",
- 'search_text': "/"
- }
-]
-
-output = {driver['name']: get_latest_version(driver['url'], driver['search_text']) for driver in drivers}
-
-with open("deployment/secure_research_desktop/packages/dbeaver-driver-versions.json", "w") as f_out:
- f_out.writelines(json.dumps(output, indent=4, sort_keys=True))
diff --git a/.github/scripts/update_rstudio.py b/.github/scripts/update_rstudio.py
deleted file mode 100644
index ee36a35e66..0000000000
--- a/.github/scripts/update_rstudio.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /usr/bin/env python3
-from lxml import html
-import hashlib
-import requests
-
-remote_page = requests.get("https://www.rstudio.com/products/rstudio/download/", allow_redirects=True)
-root = html.fromstring(remote_page.content)
-short_links = [link for link in root.xpath("//a[contains(text(), '.deb')]/@href") if "debian" not in link]
-
-for ubuntu_version in ["focal", "jammy"]:
- short_link = [link for link in short_links if ubuntu_version in link][0]
- remote_content = requests.get(short_link, allow_redirects=True)
- sha256 = hashlib.sha256(remote_content.content).hexdigest()
- version = "-".join(remote_content.url.split("/")[-1].split("-")[1:-1])
- remote = "/".join(remote_content.url.split("/")[:-1] + ["|DEBFILE|"])
-
- with open(f"deployment/secure_research_desktop/packages/deb-rstudio-{ubuntu_version}.version", "w") as f_out:
- f_out.write(f"hash: {sha256}\n")
- f_out.write(f"version: {version}\n")
- f_out.write("debfile: rstudio-|VERSION|-amd64.deb\n")
- f_out.write(f"remote: {remote}\n")
diff --git a/.github/security_checklist_template.md b/.github/security_checklist_template.md
deleted file mode 100644
index b963331eef..0000000000
--- a/.github/security_checklist_template.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Security checklist
-Running on SHM/SREs deployed using commit XXXXXXX
-
-## Summary
-+ :white_check_mark: N tests passed
-- :partly_sunny: N tests partially passed (see below for more details)
-- :fast_forward: N tests skipped (see below for more details)
-- :x: N tests failed (see below for more details)
-
-## Details
-Some security checks were skipped since:
-- No managed device was available
-- No access to a physical space with its own dedicated network was possible
-
-### Multifactor Authentication and Password strength
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the SRE standard user cannot access the apps
- + :camera: Verify before adding to group: Microsoft Remote Desktop: Login works but apps cannot be viewed
-
-
- + :camera: Verify before adding to group: Guacamole: User is prompted to setup MFA
-
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that adding the **SRE standard user** to the SRE group on the domain controller does not give them access
- + :camera: Verify after adding to group: Microsoft Remote Desktop: Login works and apps can be viewed
-
-
- + :camera: Verify after adding to group: Microsoft Remote Desktop: attempt to login to DSVM Main (Desktop) fails
-
-
- + :camera: Verify before adding to group: Guacamole: User is prompted to setup MFA
-
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** is able to successfully set up MFA
- + :camera: Verify: successfully set up MFA
-
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** can authenticate with MFA
- + :camera: Verify: Guacamole: respond to the MFA prompt
- 122043131-47bc8080-cddb-11eb-8578-e45ab3efaef0.png">
-
- + :camera: Verify: Microsoft Remote Desktop: attempt to log in to DSVM Main (Desktop) and respond to the MFA prompt
- 122043131-47bc8080-cddb-11eb-8578-e45ab3efaef0.png">
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** can access the DSVM desktop
- + :camera: Verify: Microsoft Remote Desktop: connect to DSVM Main (Desktop)
-
-
- + :camera: Verify: Guacamole: connect to Desktop: Ubuntu0
-
-
-
-### Isolated Network
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connect to the SHM DC and NPS if connected to the SHM VPN
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the SHM DC and NPS if not connected to the SHM VPN
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the internet from within a DSVM on the SRE network.
- + :camera: Verify: Connection fails
- 122045859-8142bb00-cdde-11eb-920c-3a162a180647.png">
-
- + :camera: Verify: that you cannot access a website using curl
-
-
- + :camera: Verify: that you cannot get the IP address for a website using nslookup
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that users cannot connect between two SREs within the same SHM, even if they have access to both SREs
- + :camera: Verify: SSH connection fails
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules are set appropriately to block outgoing traffic
- + :camera: Verify: access rules
-
-
-
-### User devices
-#### Tier 2:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connection succeeds from a personal device with an allow-listed IP address
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check connection
-
-#### Tier 3:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check user lacks root access
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connection succeeds from a personal device with an allow-listed IP address
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check connection with an allow-listed IP address
-
-#### Tiers 2+:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules permit access only from allow-listed IP addresses
- + :camera: Verify: access rules
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: All non-deployment NSGs have rules denying inbound connections from outside the Virtual Network
-
-### Physical security
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so connection from outside was not tested
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so connection from inside was not tested
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check the network IP ranges corresponding to the research spaces and compare against the IPs accepted by the firewall.
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so confirmation of physical measures was not tested
-
-### Remote connections
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to connect as a user to the remote desktop server via SSH
- + :camera: Verify: SSH connection by FQDN fails
-
-
- + :camera: Verify: SSH connection by public IP address fails
-
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: The remote desktop server is the only SRE resource with a public IP address
-
-### Copy-and-paste
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to paste local text into a DSVM
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to copy text from a DSVM
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Copy between VMs in an SRE succeeds
-
-### Data ingress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** token was sent using a secure, out-of-band communication channel (e.g. secure email)
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an allow-listed IP address succeeds
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** downloading a file from an allow-listed IP address fails
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an non-allowed IP address fails
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** connection during lifetime of short-duration token succeeds
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** connection after lifetime of short-duration token fails
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading different file types succeeds
-
-### Storage volumes and egress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to the `/output` volume
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can only read from the `/data` volume
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to their directory in `/home`
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to the `/shared` volume
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** can see the files ready for egress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** can download egress-ready files
-
-### Software Ingress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** expected software tools are installed
- + :camera: Verify: DBeaver, RStudio, PyCharm and Visual Studio Code available
- 122056611-0a132400-cdea-11eb-9087-385ab296189e.png">
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading is possible only during the token lifetime
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** uploaded files are readable and can be installed on the DSVM
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** uploaded files are readable but cannot be installed on the DSVM
-
-### Package mirrors
-
-#### Tier 2:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install any packages
- + :camera: Verify: botocore can be installed
-
-
-
-#### Tier 3:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install only allow-listed packages
- + :camera: Verify: aero-calc can be installed; botocore cannot be installed
-
-
-
-### Azure firewalls
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Admin has limited access to the internet
- + :camera: Verify: SHM DC cannot connect to google
- 122067607-ff5d8c80-cdf3-11eb-8e20-a401faba0be4.png">
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Admin can download Windows updates
- + :camera: Verify: Windows updates can be downloaded
- 122067641-071d3100-cdf4-11eb-9dc8-03938ff49e3a.png">
-
diff --git a/.github/workflows/lint_code.yaml b/.github/workflows/lint_code.yaml
index 7786fc4b62..4d0caed16c 100644
--- a/.github/workflows/lint_code.yaml
+++ b/.github/workflows/lint_code.yaml
@@ -108,7 +108,7 @@ jobs:
done
rm expanded.tmp
- name: Lint YAML
- uses: karancode/yamllint-github-action@v2.1.1
+ uses: karancode/yamllint-github-action@v3.0.0
with:
yamllint_strict: true
yamllint_comment: false
diff --git a/.hatch/requirements-docs.txt b/.hatch/requirements-docs.txt
index cebd1e3c16..d8350ffbe4 100644
--- a/.hatch/requirements-docs.txt
+++ b/.hatch/requirements-docs.txt
@@ -1,17 +1,125 @@
#
# This file is autogenerated by hatch-pip-compile with Python 3.12
#
+# - appdirs==1.4.4
+# - azure-core==1.32.0
+# - azure-identity==1.19.0
+# - azure-keyvault-certificates==4.9.0
+# - azure-keyvault-keys==4.10.0
+# - azure-keyvault-secrets==4.9.0
+# - azure-mgmt-compute==33.0.0
+# - azure-mgmt-containerinstance==10.1.0
+# - azure-mgmt-dns==8.2.0
+# - azure-mgmt-keyvault==10.3.1
+# - azure-mgmt-msi==7.0.0
+# - azure-mgmt-rdbms==10.1.0
+# - azure-mgmt-resource==23.2.0
+# - azure-mgmt-storage==21.2.1
+# - azure-storage-blob==12.24.0
+# - azure-storage-file-datalake==12.18.0
+# - azure-storage-file-share==12.20.0
+# - chevron==0.14.0
+# - cryptography==44.0.0
+# - fqdn==1.5.1
+# - psycopg[binary]==3.1.19
+# - pulumi-azure-native==2.74.0
+# - pulumi-azuread==6.0.1
+# - pulumi-random==4.16.7
+# - pulumi==3.142.0
+# - pydantic==2.10.2
+# - pyjwt[crypto]==2.10.1
+# - pytz==2024.2
+# - pyyaml==6.0.2
+# - rich==13.9.4
+# - simple-acme-dns==3.2.0
+# - typer==0.14.0
+# - websocket-client==1.8.0
# - emoji==2.14.0
# - myst-parser==4.0.0
# - pydata-sphinx-theme==0.16.0
# - sphinx-togglebutton==0.3.2
# - sphinx==8.1.3
+# - sphinxcontrib-typer==0.5.1
#
accessible-pygments==0.0.5
# via pydata-sphinx-theme
+acme==2.10.0
+ # via simple-acme-dns
alabaster==1.0.0
# via sphinx
+annotated-types==0.7.0
+ # via pydantic
+appdirs==1.4.4
+ # via hatch.envs.docs
+arpeggio==2.0.2
+ # via parver
+attrs==24.2.0
+ # via parver
+azure-common==1.1.28
+ # via
+ # azure-mgmt-compute
+ # azure-mgmt-containerinstance
+ # azure-mgmt-dns
+ # azure-mgmt-keyvault
+ # azure-mgmt-msi
+ # azure-mgmt-rdbms
+ # azure-mgmt-resource
+ # azure-mgmt-storage
+azure-core==1.32.0
+ # via
+ # hatch.envs.docs
+ # azure-identity
+ # azure-keyvault-certificates
+ # azure-keyvault-keys
+ # azure-keyvault-secrets
+ # azure-mgmt-core
+ # azure-storage-blob
+ # azure-storage-file-datalake
+ # azure-storage-file-share
+ # msrest
+azure-identity==1.19.0
+ # via hatch.envs.docs
+azure-keyvault-certificates==4.9.0
+ # via hatch.envs.docs
+azure-keyvault-keys==4.10.0
+ # via hatch.envs.docs
+azure-keyvault-secrets==4.9.0
+ # via hatch.envs.docs
+azure-mgmt-compute==33.0.0
+ # via hatch.envs.docs
+azure-mgmt-containerinstance==10.1.0
+ # via hatch.envs.docs
+azure-mgmt-core==1.5.0
+ # via
+ # azure-mgmt-compute
+ # azure-mgmt-containerinstance
+ # azure-mgmt-dns
+ # azure-mgmt-keyvault
+ # azure-mgmt-msi
+ # azure-mgmt-rdbms
+ # azure-mgmt-resource
+ # azure-mgmt-storage
+azure-mgmt-dns==8.2.0
+ # via hatch.envs.docs
+azure-mgmt-keyvault==10.3.1
+ # via hatch.envs.docs
+azure-mgmt-msi==7.0.0
+ # via hatch.envs.docs
+azure-mgmt-rdbms==10.1.0
+ # via hatch.envs.docs
+azure-mgmt-resource==23.2.0
+ # via hatch.envs.docs
+azure-mgmt-storage==21.2.1
+ # via hatch.envs.docs
+azure-storage-blob==12.24.0
+ # via
+ # hatch.envs.docs
+ # azure-storage-file-datalake
+azure-storage-file-datalake==12.18.0
+ # via hatch.envs.docs
+azure-storage-file-share==12.20.0
+ # via hatch.envs.docs
babel==2.16.0
# via
# pydata-sphinx-theme
@@ -19,9 +127,37 @@ babel==2.16.0
beautifulsoup4==4.12.3
# via pydata-sphinx-theme
certifi==2024.8.30
- # via requests
+ # via
+ # msrest
+ # requests
+cffi==1.17.1
+ # via cryptography
charset-normalizer==3.4.0
# via requests
+chevron==0.14.0
+ # via hatch.envs.docs
+click==8.1.7
+ # via
+ # typer
+ # typer-slim
+cryptography==44.0.0
+ # via
+ # hatch.envs.docs
+ # acme
+ # azure-identity
+ # azure-keyvault-keys
+ # azure-storage-blob
+ # azure-storage-file-share
+ # josepy
+ # msal
+ # pyjwt
+ # pyopenssl
+debugpy==1.8.9
+ # via pulumi
+dill==0.3.9
+ # via pulumi
+dnspython==2.7.0
+ # via simple-acme-dns
docutils==0.21.2
# via
# myst-parser
@@ -30,39 +166,151 @@ docutils==0.21.2
# sphinx-togglebutton
emoji==2.14.0
# via hatch.envs.docs
+fqdn==1.5.1
+ # via hatch.envs.docs
+grpcio==1.66.2
+ # via pulumi
idna==3.10
# via requests
imagesize==1.4.1
# via sphinx
+isodate==0.7.2
+ # via
+ # azure-keyvault-certificates
+ # azure-keyvault-keys
+ # azure-keyvault-secrets
+ # azure-mgmt-compute
+ # azure-mgmt-containerinstance
+ # azure-mgmt-dns
+ # azure-mgmt-keyvault
+ # azure-mgmt-resource
+ # azure-mgmt-storage
+ # azure-storage-blob
+ # azure-storage-file-datalake
+ # azure-storage-file-share
+ # msrest
jinja2==3.1.4
# via
# myst-parser
# sphinx
+josepy==1.14.0
+ # via acme
markdown-it-py==3.0.0
# via
# mdit-py-plugins
# myst-parser
+ # rich
markupsafe==3.0.2
# via jinja2
mdit-py-plugins==0.4.2
# via myst-parser
mdurl==0.1.2
# via markdown-it-py
+msal==1.31.1
+ # via
+ # azure-identity
+ # msal-extensions
+msal-extensions==1.2.0
+ # via azure-identity
+msrest==0.7.1
+ # via
+ # azure-mgmt-msi
+ # azure-mgmt-rdbms
myst-parser==4.0.0
# via hatch.envs.docs
+oauthlib==3.2.2
+ # via requests-oauthlib
packaging==24.2
# via sphinx
+parver==0.5
+ # via
+ # pulumi-azure-native
+ # pulumi-azuread
+ # pulumi-random
+portalocker==2.10.1
+ # via msal-extensions
+protobuf==4.25.5
+ # via pulumi
+psycopg==3.1.19
+ # via hatch.envs.docs
+psycopg-binary==3.1.19
+ # via psycopg
+pulumi==3.142.0
+ # via
+ # hatch.envs.docs
+ # pulumi-azure-native
+ # pulumi-azuread
+ # pulumi-random
+pulumi-azure-native==2.74.0
+ # via hatch.envs.docs
+pulumi-azuread==6.0.1
+ # via hatch.envs.docs
+pulumi-random==4.16.7
+ # via hatch.envs.docs
+pycparser==2.22
+ # via cffi
+pydantic==2.10.2
+ # via hatch.envs.docs
+pydantic-core==2.27.1
+ # via pydantic
pydata-sphinx-theme==0.16.0
# via hatch.envs.docs
pygments==2.18.0
# via
# accessible-pygments
# pydata-sphinx-theme
+ # rich
# sphinx
+pyjwt==2.10.1
+ # via
+ # hatch.envs.docs
+ # msal
+pyopenssl==24.3.0
+ # via
+ # acme
+ # josepy
+pyrfc3339==2.0.1
+ # via acme
+pytz==2024.2
+ # via
+ # hatch.envs.docs
+ # acme
pyyaml==6.0.2
- # via myst-parser
+ # via
+ # hatch.envs.docs
+ # myst-parser
+ # pulumi
requests==2.32.3
- # via sphinx
+ # via
+ # acme
+ # azure-core
+ # msal
+ # msrest
+ # requests-oauthlib
+ # sphinx
+requests-oauthlib==2.0.0
+ # via msrest
+rich==13.9.4
+ # via
+ # hatch.envs.docs
+ # typer
+ # typer-slim
+semver==2.13.0
+ # via
+ # pulumi
+ # pulumi-azure-native
+ # pulumi-azuread
+ # pulumi-random
+shellingham==1.5.4
+ # via
+ # typer
+ # typer-slim
+simple-acme-dns==3.2.0
+ # via hatch.envs.docs
+six==1.16.0
+ # via
+ # azure-core
+ # pulumi
snowballstemmer==2.2.0
# via sphinx
soupsieve==2.6
@@ -73,6 +321,7 @@ sphinx==8.1.3
# myst-parser
# pydata-sphinx-theme
# sphinx-togglebutton
+ # sphinxcontrib-typer
sphinx-togglebutton==0.3.2
# via hatch.envs.docs
sphinxcontrib-applehelp==2.0.0
@@ -87,10 +336,38 @@ sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
+sphinxcontrib-typer==0.5.1
+ # via hatch.envs.docs
+typer==0.14.0
+ # via hatch.envs.docs
+typer-slim==0.14.0
+ # via sphinxcontrib-typer
typing-extensions==4.12.2
- # via pydata-sphinx-theme
+ # via
+ # azure-core
+ # azure-identity
+ # azure-keyvault-certificates
+ # azure-keyvault-keys
+ # azure-keyvault-secrets
+ # azure-mgmt-compute
+ # azure-mgmt-dns
+ # azure-mgmt-keyvault
+ # azure-mgmt-resource
+ # azure-storage-blob
+ # azure-storage-file-datalake
+ # azure-storage-file-share
+ # psycopg
+ # pydantic
+ # pydantic-core
+ # pydata-sphinx-theme
+ # typer
+ # typer-slim
urllib3==2.2.3
# via requests
+validators==0.34.0
+ # via simple-acme-dns
+websocket-client==1.8.0
+ # via hatch.envs.docs
wheel==0.45.1
# via sphinx-togglebutton
diff --git a/.hatch/requirements-lint.txt b/.hatch/requirements-lint.txt
index e1ab89f54e..0c10259677 100644
--- a/.hatch/requirements-lint.txt
+++ b/.hatch/requirements-lint.txt
@@ -5,9 +5,9 @@
# - ansible==11.0.0
# - black==24.10.0
# - mypy==1.13.0
-# - pandas-stubs==2.2.3.241009
-# - pydantic==2.10.1
-# - ruff==0.8.0
+# - pandas-stubs==2.2.3.241126
+# - pydantic==2.10.2
+# - ruff==0.8.1
# - types-appdirs==1.4.3.5
# - types-chevron==0.14.2.20240310
# - types-pytz==2024.2.0.20241003
@@ -29,7 +29,7 @@ ansible-compat==24.10.0
# ansible-lint
# molecule
# pytest-ansible
-ansible-core==2.18.0
+ansible-core==2.18.1
# via
# ansible
# ansible-compat
@@ -81,7 +81,7 @@ click-help-colors==0.9.4
# via molecule
colorama==0.4.6
# via tox
-cryptography==43.0.3
+cryptography==44.0.0
# via ansible-core
distlib==0.3.9
# via
@@ -151,7 +151,7 @@ packaging==24.2
# pytest
# pytest-ansible
# tox
-pandas-stubs==2.2.3.241009
+pandas-stubs==2.2.3.241126
# via hatch.envs.lint
parsley==1.3
# via bindep
@@ -178,7 +178,7 @@ ptyprocess==0.7.0
# via pexpect
pycparser==2.22
# via cffi
-pydantic==2.10.1
+pydantic==2.10.2
# via hatch.envs.lint
pydantic-core==2.27.1
# via pydantic
@@ -186,7 +186,7 @@ pygments==2.18.0
# via rich
pyproject-api==1.8.0
# via tox
-pytest==8.3.3
+pytest==8.3.4
# via
# pytest-ansible
# pytest-xdist
@@ -197,7 +197,7 @@ pytest-ansible==24.9.0
# tox-ansible
pytest-xdist==3.6.1
# via tox-ansible
-python-daemon==3.1.0
+python-daemon==3.1.2
# via ansible-runner
python-gnupg==0.5.3
# via ansible-sign
@@ -225,7 +225,7 @@ rich==13.9.4
# ansible-lint
# enrich
# molecule
-rpds-py==0.21.0
+rpds-py==0.22.1
# via
# jsonschema
# referencing
@@ -233,7 +233,7 @@ ruamel-yaml==0.18.6
# via ansible-lint
ruamel-yaml-clib==0.2.12
# via ruamel-yaml
-ruff==0.8.0
+ruff==0.8.1
# via hatch.envs.lint
subprocess-tee==0.4.2
# via
@@ -265,7 +265,7 @@ tzdata==2024.2
# via ansible-navigator
urllib3==2.2.3
# via types-requests
-virtualenv==20.27.1
+virtualenv==20.28.0
# via tox
wcmatch==10.0
# via
diff --git a/.hatch/requirements-test.txt b/.hatch/requirements-test.txt
index 8c95d7dce0..ce24e1caee 100644
--- a/.hatch/requirements-test.txt
+++ b/.hatch/requirements-test.txt
@@ -1,7 +1,7 @@
#
# This file is autogenerated by hatch-pip-compile with Python 3.12
#
-# [constraints] .hatch/requirements.txt (SHA256: 3586aa93da255077aac182009c06aa28b96ec15387beec4148e3bebd2b9f8852)
+# [constraints] .hatch/requirements.txt (SHA256: 9b78097f41c11566a80e32726aefa74a983ac227fce27db9adba04ae7594da1c)
#
# - appdirs==1.4.4
# - azure-core==1.32.0
@@ -21,25 +21,25 @@
# - azure-storage-file-datalake==12.18.0
# - azure-storage-file-share==12.20.0
# - chevron==0.14.0
-# - cryptography==43.0.3
+# - cryptography==44.0.0
# - fqdn==1.5.1
# - psycopg[binary]==3.1.19
-# - pulumi-azure-native==2.73.1
+# - pulumi-azure-native==2.74.0
# - pulumi-azuread==6.0.1
# - pulumi-random==4.16.7
-# - pulumi==3.141.0
-# - pydantic==2.10.1
-# - pyjwt[crypto]==2.10.0
+# - pulumi==3.142.0
+# - pydantic==2.10.2
+# - pyjwt[crypto]==2.10.1
# - pytz==2024.2
# - pyyaml==6.0.2
# - rich==13.9.4
# - simple-acme-dns==3.2.0
-# - typer==0.13.1
+# - typer==0.14.0
# - websocket-client==1.8.0
# - coverage==7.6.8
# - freezegun==1.5.1
# - pytest-mock==3.14.0
-# - pytest==8.3.3
+# - pytest==8.3.4
# - requests-mock==1.12.1
#
@@ -182,7 +182,7 @@ click==8.1.7
# typer
coverage==7.6.8
# via hatch.envs.test
-cryptography==43.0.3
+cryptography==44.0.0
# via
# -c .hatch/requirements.txt
# hatch.envs.test
@@ -295,14 +295,14 @@ psycopg-binary==3.1.19
# via
# -c .hatch/requirements.txt
# psycopg
-pulumi==3.141.0
+pulumi==3.142.0
# via
# -c .hatch/requirements.txt
# hatch.envs.test
# pulumi-azure-native
# pulumi-azuread
# pulumi-random
-pulumi-azure-native==2.73.1
+pulumi-azure-native==2.74.0
# via
# -c .hatch/requirements.txt
# hatch.envs.test
@@ -318,7 +318,7 @@ pycparser==2.22
# via
# -c .hatch/requirements.txt
# cffi
-pydantic==2.10.1
+pydantic==2.10.2
# via
# -c .hatch/requirements.txt
# hatch.envs.test
@@ -330,12 +330,12 @@ pygments==2.18.0
# via
# -c .hatch/requirements.txt
# rich
-pyjwt==2.10.0
+pyjwt==2.10.1
# via
# -c .hatch/requirements.txt
# hatch.envs.test
# msal
-pyopenssl==24.2.1
+pyopenssl==24.3.0
# via
# -c .hatch/requirements.txt
# acme
@@ -344,7 +344,7 @@ pyrfc3339==2.0.1
# via
# -c .hatch/requirements.txt
# acme
-pytest==8.3.3
+pytest==8.3.4
# via
# hatch.envs.test
# pytest-mock
@@ -403,7 +403,7 @@ six==1.16.0
# azure-core
# pulumi
# python-dateutil
-typer==0.13.1
+typer==0.14.0
# via
# -c .hatch/requirements.txt
# hatch.envs.test
diff --git a/.hatch/requirements.txt b/.hatch/requirements.txt
index 82ad061fc0..f2589f1f68 100644
--- a/.hatch/requirements.txt
+++ b/.hatch/requirements.txt
@@ -19,20 +19,20 @@
# - azure-storage-file-datalake==12.18.0
# - azure-storage-file-share==12.20.0
# - chevron==0.14.0
-# - cryptography==43.0.3
+# - cryptography==44.0.0
# - fqdn==1.5.1
# - psycopg[binary]==3.1.19
-# - pulumi-azure-native==2.73.1
+# - pulumi-azure-native==2.74.0
# - pulumi-azuread==6.0.1
# - pulumi-random==4.16.7
-# - pulumi==3.141.0
-# - pydantic==2.10.1
-# - pyjwt[crypto]==2.10.0
+# - pulumi==3.142.0
+# - pydantic==2.10.2
+# - pyjwt[crypto]==2.10.1
# - pytz==2024.2
# - pyyaml==6.0.2
# - rich==13.9.4
# - simple-acme-dns==3.2.0
-# - typer==0.13.1
+# - typer==0.14.0
# - websocket-client==1.8.0
#
@@ -122,7 +122,7 @@ chevron==0.14.0
# via hatch.envs.default
click==8.1.7
# via typer
-cryptography==43.0.3
+cryptography==44.0.0
# via
# hatch.envs.default
# acme
@@ -192,13 +192,13 @@ psycopg==3.1.19
# via hatch.envs.default
psycopg-binary==3.1.19
# via psycopg
-pulumi==3.141.0
+pulumi==3.142.0
# via
# hatch.envs.default
# pulumi-azure-native
# pulumi-azuread
# pulumi-random
-pulumi-azure-native==2.73.1
+pulumi-azure-native==2.74.0
# via hatch.envs.default
pulumi-azuread==6.0.1
# via hatch.envs.default
@@ -206,17 +206,17 @@ pulumi-random==4.16.7
# via hatch.envs.default
pycparser==2.22
# via cffi
-pydantic==2.10.1
+pydantic==2.10.2
# via hatch.envs.default
pydantic-core==2.27.1
# via pydantic
pygments==2.18.0
# via rich
-pyjwt==2.10.0
+pyjwt==2.10.1
# via
# hatch.envs.default
# msal
-pyopenssl==24.2.1
+pyopenssl==24.3.0
# via
# acme
# josepy
@@ -257,7 +257,7 @@ six==1.16.0
# via
# azure-core
# pulumi
-typer==0.13.1
+typer==0.14.0
# via hatch.envs.default
typing-extensions==4.12.2
# via
diff --git a/.readthedocs.yaml b/.readthedocs.yaml
index 145d005f83..56f8f2b24f 100644
--- a/.readthedocs.yaml
+++ b/.readthedocs.yaml
@@ -16,6 +16,8 @@ formats:
python:
install:
- requirements: .hatch/requirements-docs.txt
+ - method: pip
+ path: .
sphinx:
configuration: docs/source/conf.py
diff --git a/data_safe_haven/commands/users.py b/data_safe_haven/commands/users.py
index fe413fa781..8c8b232ceb 100644
--- a/data_safe_haven/commands/users.py
+++ b/data_safe_haven/commands/users.py
@@ -5,6 +5,7 @@
import typer
+from data_safe_haven import console
from data_safe_haven.administration.users import UserHandler
from data_safe_haven.config import ContextManager, DSHPulumiConfig, SHMConfig, SREConfig
from data_safe_haven.exceptions import DataSafeHavenError
@@ -120,9 +121,9 @@ def register(
# Load SHMConfig
try:
shm_config = SHMConfig.from_remote(context)
- except DataSafeHavenError:
+ except DataSafeHavenError as exc:
logger.error("Have you deployed the SHM?")
- raise
+ raise typer.Exit(1) from exc
# Load Pulumi config
pulumi_config = DSHPulumiConfig.from_remote(context)
@@ -132,7 +133,7 @@ def register(
if sre_config.name not in pulumi_config.project_names:
msg = f"Could not load Pulumi settings for '{sre_config.name}'. Have you deployed the SRE?"
logger.error(msg)
- raise DataSafeHavenError(msg)
+ raise typer.Exit(1)
# Load GraphAPI
graph_api = GraphApi.from_scopes(
@@ -146,16 +147,29 @@ def register(
# List users
users = UserHandler(context, graph_api)
- available_usernames = users.get_usernames_entra_id()
+ available_users = users.entra_users.list()
+ user_dict = {
+ user.preferred_username.split("@")[0]: user.preferred_username.split("@")[1]
+ for user in available_users
+ }
usernames_to_register = []
for username in usernames:
- if username in available_usernames:
- usernames_to_register.append(username)
+ if user_domain := user_dict.get(username):
+ if shm_config.shm.fqdn not in user_domain:
+ console.print(
+ f"User [green]'{username}[/green]'s principal domain name is [blue]'{user_domain}'[/blue].\n"
+ f"SRE [yellow]'{sre}'[/yellow] belongs to SHM domain [blue]'{shm_config.shm.fqdn}'[/blue]."
+ )
+ logger.error(
+ "The user's principal domain name must match the domain of the SRE to be registered."
+ )
+ else:
+ usernames_to_register.append(username)
else:
logger.error(
f"Username '{username}' does not belong to this Data Safe Haven deployment."
- " Please use 'dsh users add' to create it."
)
+ console.print("Please use 'dsh users add' to create this user.")
users.register(sre_config.name, usernames_to_register)
except DataSafeHavenError as exc:
logger.critical(f"Could not register Data Safe Haven users with SRE '{sre}'.")
@@ -259,8 +273,8 @@ def unregister(
else:
logger.error(
f"Username '{username}' does not belong to this Data Safe Haven deployment."
- " Please use 'dsh users add' to create it."
)
+ console.print("Please use 'dsh users add' to create it.")
for group_name in (
f"{sre_config.name} Users",
f"{sre_config.name} Privileged Users",
diff --git a/data_safe_haven/infrastructure/components/__init__.py b/data_safe_haven/infrastructure/components/__init__.py
index f4b93b9c3d..52043d1ad3 100644
--- a/data_safe_haven/infrastructure/components/__init__.py
+++ b/data_safe_haven/infrastructure/components/__init__.py
@@ -9,6 +9,8 @@
MicrosoftSQLDatabaseProps,
NFSV3BlobContainerComponent,
NFSV3BlobContainerProps,
+ NFSV3StorageAccountComponent,
+ NFSV3StorageAccountProps,
PostgresqlDatabaseComponent,
PostgresqlDatabaseProps,
VMComponent,
@@ -23,7 +25,6 @@
)
from .wrapped import (
WrappedLogAnalyticsWorkspace,
- WrappedNFSV3StorageAccount,
)
__all__ = [
@@ -41,11 +42,12 @@
"MicrosoftSQLDatabaseProps",
"NFSV3BlobContainerComponent",
"NFSV3BlobContainerProps",
+ "NFSV3StorageAccountComponent",
+ "NFSV3StorageAccountProps",
"PostgresqlDatabaseComponent",
"PostgresqlDatabaseProps",
"SSLCertificate",
"SSLCertificateProps",
"VMComponent",
"WrappedLogAnalyticsWorkspace",
- "WrappedNFSV3StorageAccount",
]
diff --git a/data_safe_haven/infrastructure/components/composite/__init__.py b/data_safe_haven/infrastructure/components/composite/__init__.py
index bc09bc18a8..8e561dd73a 100644
--- a/data_safe_haven/infrastructure/components/composite/__init__.py
+++ b/data_safe_haven/infrastructure/components/composite/__init__.py
@@ -9,6 +9,10 @@
MicrosoftSQLDatabaseProps,
)
from .nfsv3_blob_container import NFSV3BlobContainerComponent, NFSV3BlobContainerProps
+from .nfsv3_storage_account import (
+ NFSV3StorageAccountComponent,
+ NFSV3StorageAccountProps,
+)
from .postgresql_database import PostgresqlDatabaseComponent, PostgresqlDatabaseProps
from .virtual_machine import LinuxVMComponentProps, VMComponent
@@ -23,6 +27,8 @@
"MicrosoftSQLDatabaseProps",
"NFSV3BlobContainerComponent",
"NFSV3BlobContainerProps",
+ "NFSV3StorageAccountComponent",
+ "NFSV3StorageAccountProps",
"PostgresqlDatabaseComponent",
"PostgresqlDatabaseProps",
"VMComponent",
diff --git a/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py b/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py
index 98564918a0..29550e9541 100644
--- a/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py
+++ b/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py
@@ -1,7 +1,7 @@
from pulumi import ComponentResource, Input, ResourceOptions
from pulumi_azure_native import storage
-from data_safe_haven.infrastructure.components.dynamic.blob_container_acl import (
+from data_safe_haven.infrastructure.components.dynamic import (
BlobContainerAcl,
BlobContainerAclProps,
)
@@ -52,6 +52,7 @@ def __init__(
ResourceOptions(parent=props.storage_account),
),
)
+
BlobContainerAcl(
f"{storage_container._name}_acl",
BlobContainerAclProps(
diff --git a/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py b/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py
new file mode 100644
index 0000000000..ca003bbd3d
--- /dev/null
+++ b/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py
@@ -0,0 +1,144 @@
+from collections.abc import Mapping, Sequence
+
+from pulumi import ComponentResource, Input, Output, ResourceOptions
+from pulumi_azure_native import insights, storage
+
+from data_safe_haven.external import AzureIPv4Range
+from data_safe_haven.infrastructure.components.wrapped import (
+ WrappedLogAnalyticsWorkspace,
+)
+from data_safe_haven.types import AzureServiceTag
+
+
+class NFSV3StorageAccountProps:
+ def __init__(
+ self,
+ account_name: Input[str],
+ allowed_ip_addresses: Input[Sequence[str]] | None,
+ allowed_service_tag: AzureServiceTag | None,
+ location: Input[str],
+ log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
+ resource_group_name: Input[str],
+ subnet_id: Input[str],
+ ):
+ self.account_name = account_name
+ self.allowed_ip_addresses = allowed_ip_addresses
+ self.allowed_service_tag = allowed_service_tag
+ self.location = location
+ self.log_analytics_workspace = log_analytics_workspace
+ self.resource_group_name = resource_group_name
+ self.subnet_id = subnet_id
+
+
+class NFSV3StorageAccountComponent(ComponentResource):
+ encryption_args = storage.EncryptionArgs(
+ key_source=storage.KeySource.MICROSOFT_STORAGE,
+ services=storage.EncryptionServicesArgs(
+ blob=storage.EncryptionServiceArgs(
+ enabled=True, key_type=storage.KeyType.ACCOUNT
+ ),
+ file=storage.EncryptionServiceArgs(
+ enabled=True, key_type=storage.KeyType.ACCOUNT
+ ),
+ ),
+ )
+
+ def __init__(
+ self,
+ name: str,
+ props: NFSV3StorageAccountProps,
+ opts: ResourceOptions | None = None,
+ tags: Input[Mapping[str, Input[str]]] | None = None,
+ ):
+ super().__init__("dsh:sre:NFSV3StorageAccountComponent", name, {}, opts)
+ child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self))
+ child_tags = {"component": "data"} | (tags if tags else {})
+
+ if props.allowed_service_tag == AzureServiceTag.INTERNET:
+ default_action = storage.DefaultAction.ALLOW
+ ip_rules = []
+ else:
+ default_action = storage.DefaultAction.DENY
+ ip_rules = Output.from_input(props.allowed_ip_addresses).apply(
+ lambda ip_ranges: [
+ storage.IPRuleArgs(
+ action=storage.Action.ALLOW,
+ i_p_address_or_range=str(ip_address),
+ )
+ for ip_range in sorted(ip_ranges)
+ for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips()
+ ]
+ )
+
+ # Deploy storage account
+ self.storage_account = storage.StorageAccount(
+ f"{self._name}",
+ account_name=props.account_name,
+ allow_blob_public_access=False,
+ enable_https_traffic_only=True,
+ enable_nfs_v3=True,
+ encryption=self.encryption_args,
+ is_hns_enabled=True,
+ kind=storage.Kind.BLOCK_BLOB_STORAGE,
+ location=props.location,
+ minimum_tls_version=storage.MinimumTlsVersion.TLS1_2,
+ network_rule_set=storage.NetworkRuleSetArgs(
+ bypass=storage.Bypass.AZURE_SERVICES,
+ default_action=default_action,
+ ip_rules=ip_rules,
+ virtual_network_rules=[
+ storage.VirtualNetworkRuleArgs(
+ virtual_network_resource_id=props.subnet_id,
+ )
+ ],
+ ),
+ public_network_access=storage.PublicNetworkAccess.ENABLED,
+ resource_group_name=props.resource_group_name,
+ sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS),
+ opts=child_opts,
+ tags=child_tags,
+ )
+
+ # Add diagnostic setting for blobs
+ insights.DiagnosticSetting(
+ f"{self.storage_account._name}_diagnostic_setting",
+ name=f"{self.storage_account._name}_diagnostic_setting",
+ log_analytics_destination_type="Dedicated",
+ logs=[
+ {
+ "category_group": "allLogs",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ {
+ "category_group": "audit",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ ],
+ metrics=[
+ {
+ "category": "Transaction",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ }
+ ],
+ resource_uri=self.storage_account.id.apply(
+ # This is the URI of the blobServices resource which is automatically
+ # created.
+ lambda resource_id: resource_id
+ + "/blobServices/default"
+ ),
+ workspace_id=props.log_analytics_workspace.id,
+ )
+
+ self.register_outputs({})
diff --git a/data_safe_haven/infrastructure/components/wrapped/__init__.py b/data_safe_haven/infrastructure/components/wrapped/__init__.py
index b449f46859..fc5f8c8f61 100644
--- a/data_safe_haven/infrastructure/components/wrapped/__init__.py
+++ b/data_safe_haven/infrastructure/components/wrapped/__init__.py
@@ -1,7 +1,5 @@
from .log_analytics_workspace import WrappedLogAnalyticsWorkspace
-from .nfsv3_storage_account import WrappedNFSV3StorageAccount
__all__ = [
"WrappedLogAnalyticsWorkspace",
- "WrappedNFSV3StorageAccount",
]
diff --git a/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py b/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py
deleted file mode 100644
index e259de4806..0000000000
--- a/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from collections.abc import Mapping, Sequence
-
-from pulumi import Input, Output, ResourceOptions
-from pulumi_azure_native import storage
-
-from data_safe_haven.external import AzureIPv4Range
-from data_safe_haven.types import AzureServiceTag
-
-
-class WrappedNFSV3StorageAccount(storage.StorageAccount):
- encryption_args = storage.EncryptionArgs(
- key_source=storage.KeySource.MICROSOFT_STORAGE,
- services=storage.EncryptionServicesArgs(
- blob=storage.EncryptionServiceArgs(
- enabled=True, key_type=storage.KeyType.ACCOUNT
- ),
- file=storage.EncryptionServiceArgs(
- enabled=True, key_type=storage.KeyType.ACCOUNT
- ),
- ),
- )
-
- def __init__(
- self,
- resource_name: str,
- *,
- account_name: Input[str],
- allowed_ip_addresses: Input[Sequence[str]] | None,
- allowed_service_tag: AzureServiceTag | None,
- location: Input[str],
- resource_group_name: Input[str],
- subnet_id: Input[str],
- opts: ResourceOptions,
- tags: Input[Mapping[str, Input[str]]],
- ):
- if allowed_service_tag == AzureServiceTag.INTERNET:
- default_action = storage.DefaultAction.ALLOW
- ip_rules = []
- else:
- default_action = storage.DefaultAction.DENY
- ip_rules = Output.from_input(allowed_ip_addresses).apply(
- lambda ip_ranges: [
- storage.IPRuleArgs(
- action=storage.Action.ALLOW,
- i_p_address_or_range=str(ip_address),
- )
- for ip_range in sorted(ip_ranges)
- for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips()
- ]
- )
-
- self.resource_group_name_ = Output.from_input(resource_group_name)
- super().__init__(
- resource_name,
- account_name=account_name,
- allow_blob_public_access=False,
- enable_https_traffic_only=True,
- enable_nfs_v3=True,
- encryption=self.encryption_args,
- is_hns_enabled=True,
- kind=storage.Kind.BLOCK_BLOB_STORAGE,
- location=location,
- minimum_tls_version=storage.MinimumTlsVersion.TLS1_2,
- network_rule_set=storage.NetworkRuleSetArgs(
- bypass=storage.Bypass.AZURE_SERVICES,
- default_action=default_action,
- ip_rules=ip_rules,
- virtual_network_rules=[
- storage.VirtualNetworkRuleArgs(
- virtual_network_resource_id=subnet_id,
- )
- ],
- ),
- public_network_access=storage.PublicNetworkAccess.ENABLED,
- resource_group_name=resource_group_name,
- sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS),
- opts=opts,
- tags=tags,
- )
diff --git a/data_safe_haven/infrastructure/programs/declarative_sre.py b/data_safe_haven/infrastructure/programs/declarative_sre.py
index 913f7c6bd7..dc3ab43ea1 100644
--- a/data_safe_haven/infrastructure/programs/declarative_sre.py
+++ b/data_safe_haven/infrastructure/programs/declarative_sre.py
@@ -163,6 +163,20 @@ def __call__(self) -> None:
),
)
+ # Deploy monitoring
+ monitoring = SREMonitoringComponent(
+ "sre_monitoring",
+ self.stack_name,
+ SREMonitoringProps(
+ dns_private_zones=dns.private_zones,
+ location=self.config.azure.location,
+ resource_group_name=resource_group.name,
+ subnet=networking.subnet_monitoring,
+ timezone=self.config.sre.timezone,
+ ),
+ tags=self.tags,
+ )
+
# Deploy SRE firewall
SREFirewallComponent(
"sre_firewall",
@@ -170,6 +184,7 @@ def __call__(self) -> None:
SREFirewallProps(
allow_workspace_internet=self.config.sre.allow_workspace_internet,
location=self.config.azure.location,
+ log_analytics_workspace=monitoring.log_analytics,
resource_group_name=resource_group.name,
route_table_name=networking.route_table_name,
subnet_apt_proxy_server=networking.subnet_apt_proxy_server,
@@ -197,6 +212,7 @@ def __call__(self) -> None:
dns_record=networking.shm_ns_record,
dns_server_admin_password=dns.password_admin,
location=self.config.azure.location,
+ log_analytics_workspace=monitoring.log_analytics,
resource_group=resource_group,
sre_fqdn=networking.sre_fqdn,
storage_quota_gb_home=self.config.sre.storage_quota_gb.home,
@@ -210,20 +226,6 @@ def __call__(self) -> None:
tags=self.tags,
)
- # Deploy monitoring
- monitoring = SREMonitoringComponent(
- "sre_monitoring",
- self.stack_name,
- SREMonitoringProps(
- dns_private_zones=dns.private_zones,
- location=self.config.azure.location,
- resource_group_name=resource_group.name,
- subnet=networking.subnet_monitoring,
- timezone=self.config.sre.timezone,
- ),
- tags=self.tags,
- )
-
# Deploy the apt proxy server
apt_proxy_server = SREAptProxyServerComponent(
"sre_apt_proxy_server",
@@ -377,6 +379,7 @@ def __call__(self) -> None:
ldap_user_filter=ldap_user_filter,
ldap_user_search_base=ldap_user_search_base,
location=self.config.azure.location,
+ log_analytics_workspace=monitoring.log_analytics,
resource_group=resource_group,
software_repository_hostname=user_services.software_repositories.hostname,
subnet_desired_state=networking.subnet_desired_state,
diff --git a/data_safe_haven/infrastructure/programs/sre/data.py b/data_safe_haven/infrastructure/programs/sre/data.py
index 711b76139f..825861c122 100644
--- a/data_safe_haven/infrastructure/programs/sre/data.py
+++ b/data_safe_haven/infrastructure/programs/sre/data.py
@@ -7,6 +7,7 @@
from pulumi import ComponentResource, Input, Output, ResourceOptions
from pulumi_azure_native import (
authorization,
+ insights,
keyvault,
managedidentity,
network,
@@ -31,9 +32,11 @@
from data_safe_haven.infrastructure.components import (
NFSV3BlobContainerComponent,
NFSV3BlobContainerProps,
+ NFSV3StorageAccountComponent,
+ NFSV3StorageAccountProps,
SSLCertificate,
SSLCertificateProps,
- WrappedNFSV3StorageAccount,
+ WrappedLogAnalyticsWorkspace,
)
from data_safe_haven.types import AzureDnsZoneNames, AzureServiceTag
@@ -51,6 +54,7 @@ def __init__(
dns_record: Input[network.RecordSet],
dns_server_admin_password: Input[pulumi_random.RandomPassword],
location: Input[str],
+ log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
resource_group: Input[resources.ResourceGroup],
sre_fqdn: Input[str],
storage_quota_gb_home: Input[int],
@@ -69,6 +73,7 @@ def __init__(
self.dns_record = dns_record
self.password_dns_server_admin = dns_server_admin_password
self.location = location
+ self.log_analytics_workspace = log_analytics_workspace
self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg)
self.resource_group_name = Output.from_input(resource_group).apply(
get_name_from_rg
@@ -421,6 +426,45 @@ def __init__(
resource_group_name=kwargs["resource_group_name"],
)
)
+ # Add diagnostic setting for files
+ insights.DiagnosticSetting(
+ f"{storage_account_data_configuration._name}_diagnostic_setting",
+ name=f"{storage_account_data_configuration._name}_diagnostic_setting",
+ log_analytics_destination_type="Dedicated",
+ logs=[
+ {
+ "category_group": "allLogs",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ {
+ "category_group": "audit",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ ],
+ metrics=[
+ {
+ "category": "Transaction",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ }
+ ],
+ # This is the URI of the automatically created fileService resource
+ resource_uri=Output.concat(
+ storage_account_data_configuration.id, "/fileServices/default"
+ ),
+ workspace_id=props.log_analytics_workspace.id,
+ )
# Set up a private endpoint for the configuration data storage account
storage_account_data_configuration_private_endpoint = network.PrivateEndpoint(
f"{storage_account_data_configuration._name}_private_endpoint",
@@ -467,20 +511,26 @@ def __init__(
# Deploy sensitive data blob storage account
# - This holds the /mnt/input and /mnt/output containers that are mounted by workspaces
# - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer
- storage_account_data_private_sensitive = WrappedNFSV3StorageAccount(
+ component_data_private_sensitive = NFSV3StorageAccountComponent(
f"{self._name}_storage_account_data_private_sensitive",
- # Storage account names have a maximum of 24 characters
- account_name=alphanumeric(
- f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}"
- )[:24],
- allowed_ip_addresses=data_private_sensitive_ip_addresses,
- allowed_service_tag=data_private_sensitive_service_tag,
- location=props.location,
- subnet_id=props.subnet_data_private_id,
- resource_group_name=props.resource_group_name,
+ NFSV3StorageAccountProps(
+ # Storage account names have a maximum of 24 characters
+ account_name=alphanumeric(
+ f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}"
+ )[:24],
+ allowed_ip_addresses=data_private_sensitive_ip_addresses,
+ allowed_service_tag=data_private_sensitive_service_tag,
+ location=props.location,
+ log_analytics_workspace=props.log_analytics_workspace,
+ subnet_id=props.subnet_data_private_id,
+ resource_group_name=props.resource_group_name,
+ ),
opts=child_opts,
tags=child_tags,
)
+ storage_account_data_private_sensitive = (
+ component_data_private_sensitive.storage_account
+ )
# Deploy storage containers
NFSV3BlobContainerComponent(
f"{self._name}_blob_egress",
@@ -615,6 +665,45 @@ def __init__(
opts=child_opts,
tags=child_tags,
)
+ # Add diagnostic setting for files
+ insights.DiagnosticSetting(
+ f"{storage_account_data_private_user._name}_diagnostic_setting",
+ name=f"{storage_account_data_private_user._name}_diagnostic_setting",
+ log_analytics_destination_type="Dedicated",
+ logs=[
+ {
+ "category_group": "allLogs",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ {
+ "category_group": "audit",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ ],
+ metrics=[
+ {
+ "category": "Transaction",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ }
+ ],
+ # This is the URI of the automatically created fileService resource
+ resource_uri=Output.concat(
+ storage_account_data_private_user.id, "/fileServices/default"
+ ),
+ workspace_id=props.log_analytics_workspace.id,
+ )
storage.FileShare(
f"{storage_account_data_private_user._name}_files_home",
access_tier=storage.ShareAccessTier.PREMIUM,
diff --git a/data_safe_haven/infrastructure/programs/sre/desired_state.py b/data_safe_haven/infrastructure/programs/sre/desired_state.py
index c4392f5210..20f4e357f1 100644
--- a/data_safe_haven/infrastructure/programs/sre/desired_state.py
+++ b/data_safe_haven/infrastructure/programs/sre/desired_state.py
@@ -31,7 +31,9 @@
from data_safe_haven.infrastructure.components import (
NFSV3BlobContainerComponent,
NFSV3BlobContainerProps,
- WrappedNFSV3StorageAccount,
+ NFSV3StorageAccountComponent,
+ NFSV3StorageAccountProps,
+ WrappedLogAnalyticsWorkspace,
)
from data_safe_haven.resources import resources_path
from data_safe_haven.types import AzureDnsZoneNames
@@ -55,6 +57,7 @@ def __init__(
ldap_user_filter: Input[str],
ldap_user_search_base: Input[str],
location: Input[str],
+ log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
resource_group: Input[resources.ResourceGroup],
software_repository_hostname: Input[str],
subscription_name: Input[str],
@@ -73,6 +76,7 @@ def __init__(
self.ldap_user_filter = ldap_user_filter
self.ldap_user_search_base = ldap_user_search_base
self.location = location
+ self.log_analytics_workspace = log_analytics_workspace
self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg)
self.resource_group_name = Output.from_input(resource_group).apply(
get_name_from_rg
@@ -102,19 +106,23 @@ def __init__(
# Deploy desired state storage account
# - This holds the /var/local/ansible container that is mounted by workspaces
# - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer
- storage_account = WrappedNFSV3StorageAccount(
+ storage_component = NFSV3StorageAccountComponent(
f"{self._name}_storage_account",
- account_name=alphanumeric(
- f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}"
- )[:24],
- allowed_ip_addresses=props.admin_ip_addresses,
- allowed_service_tag=None,
- location=props.location,
- resource_group_name=props.resource_group_name,
- subnet_id=props.subnet_desired_state_id,
+ NFSV3StorageAccountProps(
+ account_name=alphanumeric(
+ f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}"
+ )[:24],
+ allowed_ip_addresses=props.admin_ip_addresses,
+ allowed_service_tag=None,
+ location=props.location,
+ log_analytics_workspace=props.log_analytics_workspace,
+ resource_group_name=props.resource_group_name,
+ subnet_id=props.subnet_desired_state_id,
+ ),
opts=child_opts,
tags=child_tags,
)
+ storage_account = storage_component.storage_account
# Deploy desired state share
container_desired_state = NFSV3BlobContainerComponent(
f"{self._name}_blob_desired_state",
diff --git a/data_safe_haven/infrastructure/programs/sre/firewall.py b/data_safe_haven/infrastructure/programs/sre/firewall.py
index ab873491e6..a256ed749f 100644
--- a/data_safe_haven/infrastructure/programs/sre/firewall.py
+++ b/data_safe_haven/infrastructure/programs/sre/firewall.py
@@ -3,12 +3,13 @@
from collections.abc import Mapping
from pulumi import ComponentResource, Input, Output, ResourceOptions
-from pulumi_azure_native import network
+from pulumi_azure_native import insights, network
from data_safe_haven.infrastructure.common import (
get_address_prefixes_from_subnet,
get_id_from_subnet,
)
+from data_safe_haven.infrastructure.components import WrappedLogAnalyticsWorkspace
from data_safe_haven.types import (
FirewallPriorities,
ForbiddenDomains,
@@ -25,6 +26,7 @@ def __init__(
*,
allow_workspace_internet: bool,
location: Input[str],
+ log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
resource_group_name: Input[str],
route_table_name: Input[str],
subnet_apt_proxy_server: Input[network.GetSubnetResult],
@@ -38,6 +40,7 @@ def __init__(
) -> None:
self.allow_workspace_internet = allow_workspace_internet
self.location = location
+ self.log_analytics_workspace = log_analytics_workspace
self.resource_group_name = resource_group_name
self.route_table_name = route_table_name
self.subnet_apt_proxy_server_prefixes = Output.from_input(
@@ -365,6 +368,36 @@ def __init__(
tags=child_tags,
)
+ # Add diagnostic settings for firewall
+ # This links the firewall to the log analytics workspace
+ insights.DiagnosticSetting(
+ f"{self._name}_firewall_diagnostic_settings",
+ name="firewall_diagnostic_settings",
+ log_analytics_destination_type="Dedicated",
+ logs=[
+ {
+ "category_group": "allLogs",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ },
+ ],
+ metrics=[
+ {
+ "category": "AllMetrics",
+ "enabled": True,
+ "retention_policy": {
+ "days": 0,
+ "enabled": False,
+ },
+ }
+ ],
+ resource_uri=firewall.id,
+ workspace_id=props.log_analytics_workspace.id,
+ )
+
# Retrieve the private IP address for the firewall
private_ip_address = self.firewall.ip_configurations.apply(
lambda cfgs: "" if not cfgs else cfgs[0].private_ip_address
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f262d36dc2..a1e2c34385 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -55,6 +55,7 @@
extensions = [
"myst_parser",
"sphinx_togglebutton",
+ "sphinxcontrib.typer",
]
# Add any paths that contain templates here, relative to this directory.
diff --git a/docs/source/deployment/deploy_shm.md b/docs/source/deployment/deploy_shm.md
index b26d451bfb..ef6b7090f5 100644
--- a/docs/source/deployment/deploy_shm.md
+++ b/docs/source/deployment/deploy_shm.md
@@ -54,7 +54,7 @@ Before deploying the Safe Haven Management (SHM) infrastructure you need to deci
Use the short name without spaces, _e.g._ **uksouth** not **UK South**
:::
-Once you've decided on these, run the following command: [approx 5 minutes]:
+Once you've decided on these, run the {typer}`dsh-shm-deploy` command: [approx 5 minutes]:
:::{code} shell
$ dsh shm deploy --entra-tenant-id YOUR_ENTRA_TENANT_ID \
diff --git a/docs/source/deployment/deploy_sre.md b/docs/source/deployment/deploy_sre.md
index 5a5a5b4166..ebf1aa425a 100644
--- a/docs/source/deployment/deploy_sre.md
+++ b/docs/source/deployment/deploy_sre.md
@@ -21,6 +21,7 @@ $ az provider register --name Microsoft.Network
Each project will have its own dedicated SRE.
- Create a configuration file (optionally starting from one of our standard {ref}`policy_classification_sensitivity_tiers`)
+- The {typer}`dsh-config-template` command provides template configuration files
::::{admonition} EITHER start from a blank template
:class: dropdown note
@@ -254,7 +255,7 @@ If you want to make changes to the config, edit this file and then run `dsh conf
## Deployment
-- Deploy each SRE individually [approx 30 minutes]:
+- Deploy each SRE individually using {typer}`dsh sre deploy` [approx 30 minutes]:
:::{code} shell
$ dsh sre deploy YOUR_SRE_NAME
diff --git a/docs/source/index.md b/docs/source/index.md
index 764b06ec38..2c3be84b84 100644
--- a/docs/source/index.md
+++ b/docs/source/index.md
@@ -9,6 +9,7 @@ design/index.md
deployment/index.md
management/index.md
roles/index.md
+reference/index.md
contributing/index.md
:::
@@ -42,6 +43,8 @@ You can read them through in order or simply jump to the section that you are mo
- If you want to deploy your own Data Safe Haven.
- [**Management**](management/index.md)
- If you want to manage a Data Safe Haven that you (or someone else) has deployed.
+- [**CLI Reference**](reference/index.md)
+ - If you want details of all the Command Line Interface commands.
- [**Roles**](roles/index.md)
- Information about the different user roles in the Data Safe Haven.
- Instructions and advice for the actions of different user roles.
diff --git a/docs/source/management/logs.md b/docs/source/management/logs.md
index f9a9948453..10b9bfb0e5 100644
--- a/docs/source/management/logs.md
+++ b/docs/source/management/logs.md
@@ -7,10 +7,71 @@ Some of these logs are ingested into a central location, an Azure [Log Analytics
Each SRE has its own Log Analytics Workspace.
You can view the workspaces by going to the Azure portal and navigating to [Log Analytics Workspaces](https://portal.azure.com/#browse/Microsoft.OperationalInsights%2Fworkspaces).
-Select which log workspace you want to view by clicking on the workspace named `shm--sre--log`.
+Select which Log Analytics Workspace you want to view by clicking on the workspace named `shm--sre--log`.
The logs can be filtered using [Kusto Query Language (KQL)](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/log-query-overview).
+## Storage logs
+
+Depending on how different parts of Data Safe Haven storage are provisioned, logs may differ.
+
+### Sensitive data logs
+
+The sensitive data containers are the [ingress and egress containers](./data.md).
+Logs from these containers are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageBlobLogs`
+: Events occurring on the blob containers.
+: For example data being uploaded, extracted or read.
+
+`AzureMetrics`
+: Various metrics on blob container utilisation and performance.
+: This table is not reserved for the sensitive data containers and other resources may log to it.
+
+### Desired state data logs
+
+The desired state container holds the data necessary to configure virtual machines in an SRE.
+Logs from the desired state container are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageBlobLogs`
+: Events occurring on the blob containers.
+: For example data being uploaded, extracted or read.
+
+`AzureMetrics`
+: Various metrics on blob container utilisation and performance.
+: This table is not reserved for the desired state data container and other resources may log to it.
+
+### User data logs
+
+The user data file share holds the {ref}`researchers'` [home directories](https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s08.html), where they will store their personal data and configuration.
+Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageFileLogs`
+: NFS events occurring on the file share.
+: For example data being written or directories being accessed
+
+`AzureMetrics`
+: Various metrics on file share utilisation and performance.
+: This table is not reserved for the user data share and other resources may log to it.
+
+### Configuration data logs
+
+There are multiple configuration data file shares.
+Each contains the configuration and state data for the Data Safe Haven [services deployed as containers](#container-logs).
+Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageFileLogs`
+: SMB events occurring on the file share.
+: For example data being written or directories being accessed
+
+`AzureMetrics`
+: Various metrics on file share utilisation and performance.
+: This table is not reserved for the configuration data shares and other resources may log to it.
+
## Container logs
Some of the Data Safe Haven infrastructure is provisioned as containers.
@@ -20,8 +81,8 @@ These include,
- package proxy
- Gitea and Hedgedoc
-Logs from all containers are ingested into the [SREs log workspace](#log-workspace).
-There are two logs
+Logs from all containers are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
`ContainerEvents_CL`
: Event logs for the container instance resources such as starting, stopping, crashes and pulling images.
@@ -29,3 +90,39 @@ There are two logs
`ContainerInstanceLog_CL`
: Container process logs.
: This is where you can view the output of the containerised applications and will be useful for debugging problems.
+
+## Workspace logs
+
+Logs from all user workspaces are ingested into the [SRE's log analytics workspace](#log-workspace) using the [Azure Monitor Agent](https://learn.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-overview).
+
+There are three tables,
+
+`Perf`
+: Usage statistics for individual workspaces, such as percent memory used and percent disk space used.
+
+`Syslog`
+: [syslog](https://www.paessler.com/it-explained/syslog) events from workspaces.
+: Syslog is the _de facto_ standard protocol for logging on Linux and most applications will log to it.
+: These logs will be useful for debugging problems with the workspace or workspace software.
+
+`Heartbeat`
+: Verification that the Azure Monitor Agent is present on the workspaces and is able to connect to the [log analytics workspace](#log-workspace).
+
+## Firewall logs
+
+The firewall plays a critical role in the security of a Data Safe Haven.
+It filters all outbound traffic through a set of FQDN rules so that each component may only reach necessary and allowed domains.
+
+Logs from the firewall are ingested into the [SREs log workspace](#log-workspace).
+There are three tables,
+
+`AZFWApplicationRule`
+: Logs from the firewalls FDQN filters.
+: Shows requests to the outside of the Data Safe Haven and why they have been approved or rejected.
+
+`AZFWDnsQuery`
+: DNS requests handled by the firewall.
+
+`AzureMetrics`
+: Various metrics on firewall utilisation and performance.
+: This table is not reserved for the firewall and other resources may log to it.
diff --git a/docs/source/management/sre.md b/docs/source/management/sre.md
index aab94c31a1..fddaa5e135 100644
--- a/docs/source/management/sre.md
+++ b/docs/source/management/sre.md
@@ -2,13 +2,13 @@
## List available SRE configurations and deployment status
-- Run the following if you want to check what SRE configurations are available in the current context, and whether those SREs are deployed
+- Use {typer}`dsh config available` to check what SRE configurations are available in the current context, and whether those SREs are deployed.
```{code} shell
$ dsh config available
```
-which will give output like the following
+will give output like the following
```{code} shell
Available SRE configurations for context 'green':
@@ -23,7 +23,7 @@ Available SRE configurations for context 'green':
## Remove a deployed Data Safe Haven
-- Run the following if you want to teardown a deployed SRE:
+- Use {typer}`dsh sre teardown` to teardown a deployed SRE:
```{code} shell
$ dsh sre teardown YOUR_SRE_NAME
@@ -37,7 +37,7 @@ Ensure that any desired outputs have been extracted before deleting the SRE.
The user groups for the SRE on Microsoft Entra ID will also be deleted.
::::
-- Run the following if you want to teardown the deployed SHM:
+- Use {typer}`dsh shm teardown` if you want to teardown the deployed SHM:
```{code} shell
$ dsh shm teardown
@@ -54,7 +54,7 @@ All SREs associated with the SHM should be torn down before the SHM is torn down
SREs are modified by updating the configuration then running the deploy command.
-- The existing configuration for the SRE can be shown using the following:
+- The existing configuration for the SRE can be shown using {typer}`dsh config show`:
```{code} shell
$ dsh config show YOUR_SRE_NAME
@@ -66,14 +66,14 @@ $ dsh config show YOUR_SRE_NAME
$ dsh config show YOUR_SRE_NAME --file YOUR_SRE_NAME.yaml
```
-- Edit the configuration file locally, and upload the new version:
+- Edit the configuration file locally, and upload the new version using {typer}`dsh config upload`:
```{code} shell
$ dsh config upload YOUR_SRE_NAME.yaml
```
- You will be shown the differences between the existing configuration and the new configuration and asked to confirm that they are correct.
-- Finally, deploy your SRE to apply any changes:
+- Finally, deploy your SRE using {typer}`dsh sre deploy` to apply any changes:
```{code} shell
$ dsh sre deploy YOUR_SRE_NAME
diff --git a/docs/source/management/user.md b/docs/source/management/user.md
index d996321162..337af2302f 100644
--- a/docs/source/management/user.md
+++ b/docs/source/management/user.md
@@ -7,7 +7,7 @@ You will need a full name, phone number, email address and country for each user
:::
1. You can add users directly in your Entra tenant, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users).
-1. Alternatively, you can add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`).
+1. Alternatively, you can use {typer}`dsh users add` to add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`).
- (Optional) you can provide a `Domain` column if you like but this will otherwise default to the domain of your SHM
- {{warning}} **Phone** must be in [E.123 international format](https://en.wikipedia.org/wiki/E.123)
- {{warning}} **CountryCode** is the two letter [ISO 3166-1 Alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements) code for the country where the user is based
@@ -32,7 +32,7 @@ $ dsh users add PATH_TO_MY_CSV_FILE
1. Browse to **{menuselection}`Groups --> All Groups`**
1. Click on the group named **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users**
1. Browse to **{menuselection}`Manage --> Members`** from the secondary menu on the left side
-- You can do this at the command line by running the following command:
+- You can do this at the command line by running `dsh users list`:
```{code} shell
$ dsh users list YOUR_SRE_NAME
@@ -54,7 +54,7 @@ $ dsh users add PATH_TO_MY_CSV_FILE
## Assign existing users to an SRE
1. You can do this directly in your Entra tenant by adding them to the **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** group, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/groups-view-azure-portal#add-a-group-member).
-1. Alternatively, you can add multiple users from the command line:
+1. Alternatively, you can add multiple users from the command line using {typer}`dsh users register`:
```{code} shell
$ dsh users register YOUR_SRE_NAME -u USERNAME_1 -u USERNAME_2
@@ -69,7 +69,7 @@ $ dsh users add PATH_TO_MY_CSV_FILE
## Manually register users for self-service password reset
:::{tip}
-Users created via the `dsh users` command line tool will be automatically registered for SSPR.
+Users created via the {typer}`dsh users` command line tool will be automatically registered for SSPR.
:::
If you have manually created a user and want to enable SSPR, do the following
diff --git a/docs/source/overview/sensitivity_tiers.md b/docs/source/overview/sensitivity_tiers.md
index 4aef9a32fe..995be6ab87 100644
--- a/docs/source/overview/sensitivity_tiers.md
+++ b/docs/source/overview/sensitivity_tiers.md
@@ -49,7 +49,7 @@ Non-technical restrictions related to information governance procedures may also
- connections to the in-browser remote desktop can only be made from an agreed set of IP addresses
- outbound connections to the internet from inside the environment are not possible
- copy-and-paste between the environment and the user's device is not possible
-- access to all packages on PyPI and CRAN is made available through a proxy or mirror server
+- access to all packages on PyPI and CRAN is made available through a proxy server
Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs.
@@ -63,7 +63,7 @@ At the Turing connections to Tier 2 environments are only permitted from **Organ
**Tier 3** environments impose the following technical controls on top of what is required at {ref}`policy_tier_2`.
-- a partial replica of agreed PyPI and CRAN packages is made available through a proxy or mirror server
+- an agreed subset of PyPI and CRAN packages is made available through a proxy server
Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs.
diff --git a/docs/source/reference/config.md b/docs/source/reference/config.md
new file mode 100644
index 0000000000..daffc4e2e8
--- /dev/null
+++ b/docs/source/reference/config.md
@@ -0,0 +1,10 @@
+# `config`
+
+`dsh config` commands are used to manage the configuration files that define SHMs and SREs.
+
+:::{typer} data_safe_haven.commands.config:config_command_group
+:width: 65
+:prog: dsh config
+:show-nested:
+:make-sections:
+:::
diff --git a/docs/source/reference/context.md b/docs/source/reference/context.md
new file mode 100644
index 0000000000..76b72a4ca0
--- /dev/null
+++ b/docs/source/reference/context.md
@@ -0,0 +1,10 @@
+# `context`
+
+`dsh context` commands are used to manage the Data Safe Haven contexts, which are the grouping within which a single SHM and its associated SREs are organised.
+
+:::{typer} data_safe_haven.commands.context:context_command_group
+:width: 65
+:prog: dsh context
+:show-nested:
+:make-sections:
+:::
diff --git a/docs/source/reference/index.md b/docs/source/reference/index.md
new file mode 100644
index 0000000000..6bcb99fd2e
--- /dev/null
+++ b/docs/source/reference/index.md
@@ -0,0 +1,44 @@
+# CLI Reference
+
+:::{toctree}
+:hidden:
+
+config.md
+context.md
+users.md
+pulumi.md
+shm.md
+sre.md
+:::
+
+A Data Safe Haven is managed using the `dsh` command line interface.
+A full guide to the commands available for managing your Data Safe Haven is provided here.
+
+The `dsh` commands are the entrypoint to the Data Safe Haven command line interface.
+All commands begin with `dsh`.
+
+:::{typer} data_safe_haven.commands.cli:application
+:prog: dsh
+:width: 65
+:::
+
+The subcommands can be used to manage various aspects of a Data Safe Haven deployment.
+For further detail on each subcommand, navigate to the relevant page.
+
+[Config](config.md)
+: Management of the configuration files used to define SHMs and SREs
+
+[Context](context.md)
+: Manage DSH contexts, the groupings that encompass an SHM and its associated SREs
+
+[Users](users.md)
+: Management of users in Entra ID
+
+[Pulumi](pulumi.md)
+: An interface to the Pulumi command line interface
+
+[shm](shm.md)
+: Management of infrastructure for DSH Safe Haven Management environments
+
+[sre](sre.md)
+: Management of infrastructure for DSH Secure Research Environments
diff --git a/docs/source/reference/pulumi.md b/docs/source/reference/pulumi.md
new file mode 100644
index 0000000000..e6cb051860
--- /dev/null
+++ b/docs/source/reference/pulumi.md
@@ -0,0 +1,10 @@
+# `pulumi`
+
+The `pulumi` function allows you to run native Pulumi command line functions to interact with an SRE stack
+
+:::{typer} data_safe_haven.commands.pulumi:pulumi_command_group
+:width: 65
+:prog: dsh pulumi
+:show-nested:
+:make-sections:
+:::
diff --git a/docs/source/reference/shm.md b/docs/source/reference/shm.md
new file mode 100644
index 0000000000..0bb9f09105
--- /dev/null
+++ b/docs/source/reference/shm.md
@@ -0,0 +1,10 @@
+# `shm`
+
+`dsh shm` commands are used to deploy or teardown DSH Safe Haven Management infrastructure
+
+:::{typer} data_safe_haven.commands.shm:shm_command_group
+:width: 65
+:prog: dsh shm
+:show-nested:
+:make-sections:
+:::
diff --git a/docs/source/reference/sre.md b/docs/source/reference/sre.md
new file mode 100644
index 0000000000..03e478efe3
--- /dev/null
+++ b/docs/source/reference/sre.md
@@ -0,0 +1,10 @@
+# `sre`
+
+`dsh sre` commands are used to deploy or teardown the infrastructure for DSH Secure Research Environments
+
+:::{typer} data_safe_haven.commands.sre:sre_command_group
+:width: 65
+:prog: dsh sre
+:show-nested:
+:make-sections:
+:::
diff --git a/docs/source/reference/users.md b/docs/source/reference/users.md
new file mode 100644
index 0000000000..9b198ce5ba
--- /dev/null
+++ b/docs/source/reference/users.md
@@ -0,0 +1,10 @@
+# `users`
+
+`dsh users` commands are used to manage users on the Entra ID associated with a DSH deployment.
+
+:::{typer} data_safe_haven.commands.users:users_command_group
+:width: 65
+:prog: dsh users
+:show-nested:
+:make-sections:
+:::
diff --git a/pyproject.toml b/pyproject.toml
index 49099b230d..8042b11eb8 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,20 +42,20 @@ dependencies = [
"azure-storage-file-datalake==12.18.0",
"azure-storage-file-share==12.20.0",
"chevron==0.14.0",
- "cryptography==43.0.3",
+ "cryptography==44.0.0",
"fqdn==1.5.1",
"psycopg[binary]==3.1.19", # needed for installation on older MacOS versions
- "pulumi-azure-native==2.73.1",
+ "pulumi-azure-native==2.74.0",
"pulumi-azuread==6.0.1",
"pulumi-random==4.16.7",
- "pulumi==3.141.0",
- "pydantic==2.10.1",
- "pyjwt[crypto]==2.10.0",
+ "pulumi==3.142.0",
+ "pydantic==2.10.2",
+ "pyjwt[crypto]==2.10.1",
"pytz==2024.2",
"pyyaml==6.0.2",
"rich==13.9.4",
"simple-acme-dns==3.2.0",
- "typer==0.13.1",
+ "typer==0.14.0",
"websocket-client==1.8.0",
]
@@ -71,15 +71,16 @@ docs = [
"pydata-sphinx-theme==0.16.0",
"sphinx-togglebutton==0.3.2",
"sphinx==8.1.3",
+ "sphinxcontrib-typer==0.5.1",
]
lint = [
"ansible-dev-tools==24.11.0",
"ansible==11.0.0",
"black==24.10.0",
"mypy==1.13.0",
- "pandas-stubs==2.2.3.241009",
- "pydantic==2.10.1",
- "ruff==0.8.0",
+ "pandas-stubs==2.2.3.241126",
+ "pydantic==2.10.2",
+ "ruff==0.8.1",
"types-appdirs==1.4.3.5",
"types-chevron==0.14.2.20240310",
"types-pytz==2024.2.0.20241003",
@@ -90,7 +91,7 @@ test = [
"coverage==7.6.8",
"freezegun==1.5.1",
"pytest-mock==3.14.0",
- "pytest==8.3.3",
+ "pytest==8.3.4",
"requests-mock==1.12.1",
]
@@ -120,7 +121,6 @@ lock-filename = ".hatch/requirements.txt"
[tool.hatch.envs.docs]
type = "pip-compile"
lock-filename = ".hatch/requirements-docs.txt"
-detached = true
features = ["docs"]
[tool.hatch.envs.docs.scripts]
diff --git a/tests/commands/conftest.py b/tests/commands/conftest.py
index d675398bfc..de60eb29d0 100644
--- a/tests/commands/conftest.py
+++ b/tests/commands/conftest.py
@@ -1,6 +1,8 @@
from pytest import fixture
from typer.testing import CliRunner
+from data_safe_haven.administration.users.entra_users import EntraUsers
+from data_safe_haven.administration.users.research_user import ResearchUser
from data_safe_haven.config import (
Context,
ContextManager,
@@ -260,3 +262,14 @@ def tmp_contexts_none(tmp_path, context_yaml):
with open(config_file_path, "w") as f:
f.write(context_yaml)
return tmp_path
+
+
+@fixture
+def mock_entra_user_list(mocker):
+ test_user = ResearchUser(
+ given_name="Harry",
+ surname="Lime",
+ sam_account_name="harry.lime",
+ user_principal_name="harry.lime@acme.testing",
+ )
+ mocker.patch.object(EntraUsers, "list", return_value=[test_user])
diff --git a/tests/commands/test_users.py b/tests/commands/test_users.py
index c1b183c922..5c11e29cc9 100644
--- a/tests/commands/test_users.py
+++ b/tests/commands/test_users.py
@@ -52,6 +52,26 @@ def test_invalid_shm(
assert result.exit_code == 1
assert "Have you deployed the SHM?" in result.stdout
+ def test_mismatched_domain(
+ self,
+ mock_graphapi_get_credential, # noqa: ARG002
+ mock_pulumi_config_no_key_from_remote, # noqa: ARG002
+ mock_shm_config_from_remote, # noqa: ARG002
+ mock_sre_config_from_remote, # noqa: ARG002
+ mock_entra_user_list, # noqa: ARG002
+ runner,
+ tmp_contexts, # noqa: ARG002
+ ):
+ result = runner.invoke(
+ users_command_group, ["register", "-u", "harry.lime", "sandbox"]
+ )
+
+ assert result.exit_code == 0
+ assert (
+ "principal domain name must match the domain of the SRE to be registered"
+ in result.stdout
+ )
+
def test_invalid_sre(
self,
mock_pulumi_config_from_remote, # noqa: ARG002