diff --git a/.github/ISSUE_TEMPLATE/deployment_bug_report.md b/.github/ISSUE_TEMPLATE/deployment_bug_report.md
index 6cf453cc13..fa569038c2 100644
--- a/.github/ISSUE_TEMPLATE/deployment_bug_report.md
+++ b/.github/ISSUE_TEMPLATE/deployment_bug_report.md
@@ -29,7 +29,6 @@ Before reporting a problem please check the following. Replace the empty checkbo
 <summary>List of packages</summary>
 
 <!--
-Powershell: Run `deployment/CheckRequirements.ps1` and paste the output below
 Python: Run `hatch run pip freeze` and paste the output below
 -->
 
diff --git a/.github/ISSUE_TEMPLATE/release_checklist.md b/.github/ISSUE_TEMPLATE/release_checklist.md
index a25064faa1..f4e887e797 100644
--- a/.github/ISSUE_TEMPLATE/release_checklist.md
+++ b/.github/ISSUE_TEMPLATE/release_checklist.md
@@ -14,9 +14,9 @@ Before reporting a problem please check the following. Replace the empty checkbo
 
 Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deployment) section of our documentation when completing these steps.
 
-- [ ] Consult the `data-safe-haven/VERSIONING.md` guide and determine the version number of the new release. Record it in the title of this issue.
+- [ ] Consult the `data-safe-haven/VERSIONING.md` guide and determine the version number of the new release. Record it in the title of this issue
 - [ ] Create a release branch called e.g. `release-v0.0.1`
-- [ ] Draft a changelog for the release similar to our previous releases, see https://github.com/alan-turing-institute/data-safe-haven/releases
+- [ ] Draft a changelog for the release similar to our [previous releases](https://github.com/alan-turing-institute/data-safe-haven/releases)
 
 ### For patch releases only
 
@@ -25,35 +25,34 @@ Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deplo
 ### For minor releases and above
 
 - [ ] Deploy an SHM from this branch and save a transcript of the deployment logs
-- Using the new image, deploy a tier 2 and a tier 3 SRE
-    - [ ] Save the transcript of your tier 2 SRE deployment
-    - [ ] Save the transcript of your tier 3 SRE deployment
+- [ ] Deploy a tier 2 SRE from this branch and save the transcript of the deployment logs
+- [ ] Deploy a tier 3 SRE from this branch and save the transcript of the deployment logs
 - [ ] Complete the [Security evaluation checklist](https://data-safe-haven.readthedocs.io/en/latest/deployment/security_checklist.html) from the deployment documentation
-- [ ] Add the new versions tag as an active build on [Read The Docs](https://readthedocs.org) (You can add as a hidden build, before release, to preview)
 
 ### For major releases only
 
 - [ ] Confirm that a third party has carried out a full penetration test evaluating:
-  1. external attack surface
-  1. ability to exfiltrate data from the system
-  1. ability to transfer data between SREs
-  1. ability to escalate privileges on the SRD.
+    1. external attack surface
+    1. ability to exfiltrate data from the system
+    1. ability to transfer data between SREs
+    1. ability to escalate privileges on the SRD.
 
 ### Update documentation
 
 - [ ] Update supported versions in `SECURITY.md`
 - [ ] Update pen test results in `VERSIONING.md`
 
-## :computer: Release information
+### Making the release
 
-- **Version number:** _
-- **SHM ID:** _
-- **T2 SRE ID:** _
-- **T3 SRE ID:** _
+- [ ] Merge release branch into `latest`
+- [ ] Create a tag of the form `v0.0.1` pointing to the most recent commit on `latest` (the merge that you just made)
+- [ ] Publish your draft GitHub release using this tag
+- [ ] Ensure docs for the latest version are built and deployed on ReadTheDocs
+- [ ] Push a build to PyPI
+- [ ] Announce release on communications channels
 
 ## :deciduous_tree: Deployment problems
 
 <!--
 Keep a record in this issue of problems and fixes implemented during the release process. Be sure to update the changelog if any new commits are added to the release branch.
 -->
-
diff --git a/.github/scripts/update_azure_data_studio.py b/.github/scripts/update_azure_data_studio.py
deleted file mode 100644
index 651e85fdfc..0000000000
--- a/.github/scripts/update_azure_data_studio.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#! /usr/bin/env python3
-from lxml import html
-import hashlib
-import requests
-
-remote_page = requests.get("https://docs.microsoft.com/en-us/sql/azure-data-studio/download-azure-data-studio", allow_redirects=True)
-root = html.fromstring(remote_page.content)
-short_link = root.xpath("//a[contains(text(), '.deb')]/@href")[0]
-
-remote_content = requests.get(short_link, allow_redirects=True)
-sha256 = hashlib.sha256(remote_content.content).hexdigest()
-version = remote_content.url.split("-")[-1].replace(".deb", "")
-remote = "/".join(remote_content.url.split("/")[:-1] + ["|DEBFILE|"])
-
-with open("deployment/secure_research_desktop/packages/deb-azuredatastudio.version", "w") as f_out:
-    f_out.write(f"hash: {sha256}\n")
-    f_out.write(f"version: {version}\n")
-    f_out.write("debfile: azuredatastudio-linux-|VERSION|.deb\n")
-    f_out.write(f"remote: {remote}\n")
diff --git a/.github/scripts/update_dbeaver_drivers.py b/.github/scripts/update_dbeaver_drivers.py
deleted file mode 100644
index 696a501858..0000000000
--- a/.github/scripts/update_dbeaver_drivers.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#! /usr/bin/env python3
-import json
-from lxml import html
-from natsort import natsorted
-import requests
-
-
-def get_latest_version(url, search_text):
-    """
-    Get latest version number of a database driver from the Maven repository.
-
-    Fetches the HTML page at the given URL, then converts it to an lxml tree.
-    Numeric strings are then extracted.
-    Note that mostly numeric strings for some drivers contain non-numeric text,
-    as different driver types exist for those drivers, even where the version number is the same.
-    The largest (latest) version number of the driver is then returned.
-
-    Parameters
-    ----------
-    url : str
-        The URL of the Maven repository containing the driver
-    search_text : str
-        Text to search for in the repository, to distinguish the driver from other files
-
-    Returns
-    -------
-    list
-        The latest available version number of the driver
-    """
-
-    remote_page = requests.get(url, allow_redirects=True)
-    root = html.fromstring(remote_page.content)
-    return natsorted([v for v in root.xpath("//a[contains(text(), '" + search_text + "')]/@href") if v != "../"])[-1].replace("/", "")
-
-
-drivers = [
-    {
-        'name': "mssql_jdbc",
-        'url': "https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/",
-        'search_text': "jre8/"
-    },
-    {
-        'name': "pgjdbc",
-        'url': "https://repo1.maven.org/maven2/org/postgresql/pgjdbc-versions/",
-        'search_text': "/"
-    },
-    {
-        'name': "postgresql",
-        'url': "https://repo1.maven.org/maven2/org/postgresql/postgresql/",
-        'search_text': "/"
-    },
-    {
-        'name': "postgis_geometry",
-        'url': "https://repo1.maven.org/maven2/net/postgis/postgis-geometry/",
-        'search_text': "/"
-    },
-    {
-        'name': "postgis_jdbc",
-        'url': "https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/",
-        'search_text': "/"
-    },
-    {
-        'name': "waffle_jna",
-        'url': "https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/",
-        'search_text': "/"
-    }
-]
-
-output = {driver['name']: get_latest_version(driver['url'], driver['search_text']) for driver in drivers}
-
-with open("deployment/secure_research_desktop/packages/dbeaver-driver-versions.json", "w") as f_out:
-    f_out.writelines(json.dumps(output, indent=4, sort_keys=True))
diff --git a/.github/scripts/update_rstudio.py b/.github/scripts/update_rstudio.py
deleted file mode 100644
index ee36a35e66..0000000000
--- a/.github/scripts/update_rstudio.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /usr/bin/env python3
-from lxml import html
-import hashlib
-import requests
-
-remote_page = requests.get("https://www.rstudio.com/products/rstudio/download/", allow_redirects=True)
-root = html.fromstring(remote_page.content)
-short_links = [link for link in root.xpath("//a[contains(text(), '.deb')]/@href") if "debian" not in link]
-
-for ubuntu_version in ["focal", "jammy"]:
-    short_link = [link for link in short_links if ubuntu_version in link][0]
-    remote_content = requests.get(short_link, allow_redirects=True)
-    sha256 = hashlib.sha256(remote_content.content).hexdigest()
-    version = "-".join(remote_content.url.split("/")[-1].split("-")[1:-1])
-    remote = "/".join(remote_content.url.split("/")[:-1] + ["|DEBFILE|"])
-
-    with open(f"deployment/secure_research_desktop/packages/deb-rstudio-{ubuntu_version}.version", "w") as f_out:
-        f_out.write(f"hash: {sha256}\n")
-        f_out.write(f"version: {version}\n")
-        f_out.write("debfile: rstudio-|VERSION|-amd64.deb\n")
-        f_out.write(f"remote: {remote}\n")
diff --git a/.github/security_checklist_template.md b/.github/security_checklist_template.md
deleted file mode 100644
index b963331eef..0000000000
--- a/.github/security_checklist_template.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Security checklist
-Running on SHM/SREs deployed using commit XXXXXXX
-
-## Summary
-+ :white_check_mark: N tests passed
-- :partly_sunny: N tests partially passed (see below for more details)
-- :fast_forward: N tests skipped (see below for more details)
-- :x: N tests failed (see below for more details)
-
-## Details
-Some security checks were skipped since:
-- No managed device was available
-- No access to a physical space with its own dedicated network was possible
-
-### Multifactor Authentication and Password strength
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the SRE standard user cannot access the apps
-  + <details><summary>:camera: <b>Verify before adding to group:</b> Microsoft Remote Desktop: Login works but apps cannot be viewed</summary>
-    <img src=""/>
-    </details>
-  + <details><summary>:camera: <b>Verify before adding to group:</b> Guacamole: User is prompted to setup MFA</summary>
-    <img src=""/>
-    </details>
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x:  Check that adding the **SRE standard user** to the SRE group on the domain controller does not give them access
-  + <details><summary>:camera: <b>Verify after adding to group:</b> Microsoft Remote Desktop: Login works and apps can be viewed</summary>
-    <img src=""/>
-    </details>
-  + <details><summary>:camera: <b>Verify after adding to group:</b> Microsoft Remote Desktop: attempt to login to DSVM Main (Desktop) fails</summary>
-    <img src=""/>
-    </details>
-  + <details><summary>:camera: <b>Verify before adding to group:</b> Guacamole: User is prompted to setup MFA</summary>
-    <img src=""/>
-    </details>
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** is able to successfully set up MFA
-  + <details><summary>:camera: <b>Verify:</b> successfully set up MFA</summary>
-    <img src=""/>
-    </details>
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** can authenticate with MFA
-  + <details><summary>:camera: <b>Verify:</b> Guacamole: respond to the MFA prompt</summary>
-    <img src=""/>122043131-47bc8080-cddb-11eb-8578-e45ab3efaef0.png">
-    </details>
-  + <details><summary>:camera: <b>Verify:</b> Microsoft Remote Desktop: attempt to log in to DSVM Main (Desktop) and respond to the MFA prompt</summary>
-    <img src=""/>122043131-47bc8080-cddb-11eb-8578-e45ab3efaef0.png">
-    </details>
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** can access the DSVM desktop
-  + <details><summary>:camera: <b>Verify:</b> Microsoft Remote Desktop: connect to <i>DSVM Main (Desktop)</i></summary>
-    <img src=""/>
-    </details>
-  + <details><summary>:camera: <b>Verify:</b> Guacamole: connect to <i>Desktop: Ubuntu0</i> </summary>
-    <img src=""/>
-    </details>
-
-### Isolated Network
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connect to the SHM DC and NPS if connected to the SHM VPN
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the SHM DC and NPS if not connected to the SHM VPN
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the internet from within a DSVM on the SRE network.
-  + <details><summary>:camera: <b>Verify:</b> Connection fails</summary>
-    <img src=""/>122045859-8142bb00-cdde-11eb-920c-3a162a180647.png">
-    </details>
-  + <details><summary>:camera: <b>Verify:</b> that you cannot access a website using curl</summary>
-    <img src=""/>
-    </details>
-  + <details><summary>:camera: <b>Verify:</b> that you cannot get the IP address for a website using nslookup</summary>
-    <img src=""/>
-    </details>
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that users cannot connect between two SREs within the same SHM, even if they have access to both SREs
-  + <details><summary>:camera: <b>Verify:</b> SSH connection fails</summary>
-    <img src=""/>
-    </details>
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules are set appropriately to block outgoing traffic
-  + <details><summary>:camera: <b>Verify:</b> access rules</summary>
-    <img src=""/>
-    </details>
-
-### User devices
-#### Tier 2:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connection succeeds from a personal device with an allow-listed IP address
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check connection
-
-#### Tier 3:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check user lacks root access
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connection succeeds from a personal device with an allow-listed IP address
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check connection with an allow-listed IP address
-
-#### Tiers 2+:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules permit access only from allow-listed IP addresses
-  + <details><summary>:camera: <b>Verify:</b> access rules</summary>
-    <img src=""/>
-    </details>
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: All non-deployment NSGs have rules denying inbound connections from outside the Virtual Network
-
-### Physical security
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so connection from outside was not tested
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so connection from inside was not tested
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check the network IP ranges corresponding to the research spaces and compare against the IPs accepted by the firewall.
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so confirmation of physical measures was not tested
-
-### Remote connections
-
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to connect as a user to the remote desktop server via SSH
-  + <details><summary>:camera: <b>Verify:</b> SSH connection by FQDN fails</summary>
-    <img src=""/>
-    </details>
-  + <details><summary>:camera: <b>Verify:</b> SSH connection by public IP address fails</summary>
-    <img src=""/>
-    </details>
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: The remote desktop server is the only SRE resource with a public IP address
-
-### Copy-and-paste
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to paste local text into a DSVM
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to copy text from a DSVM
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Copy between VMs in an SRE succeeds
-
-### Data ingress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** token was sent using a secure, out-of-band communication channel (e.g. secure email)
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an allow-listed IP address succeeds
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** downloading a file from an allow-listed IP address fails
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an non-allowed IP address fails
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** connection during lifetime of short-duration token succeeds
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** connection after lifetime of short-duration token fails
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading different file types succeeds
-
-### Storage volumes and egress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to the `/output` volume
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can only read from the `/data` volume
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to their directory in `/home`
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to the `/shared` volume
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** can see the files ready for egress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** can download egress-ready files
-
-### Software Ingress
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** expected software tools are installed
-  + <details><summary>:camera: <b>Verify:</b> DBeaver, RStudio, PyCharm and Visual Studio Code available</summary>
-    <img src=""/>122056611-0a132400-cdea-11eb-9087-385ab296189e.png">
-    </details>
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading is possible only during the token lifetime
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** uploaded files are readable and can be installed on the DSVM
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** uploaded files are readable but cannot be installed on the DSVM
-
-### Package mirrors
-
-#### Tier 2:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install any packages
-  + <details><summary>:camera: <b>Verify:</b> botocore can be installed</summary>
-    <img src=""/>
-    </details>
-
-#### Tier 3:
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install only allow-listed packages
-  + <details><summary>:camera: <b>Verify:</b> aero-calc can be installed; botocore cannot be installed</summary>
-    <img src=""/>
-    </details>
-
-### Azure firewalls
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Admin has limited access to the internet
-  + <details><summary>:camera: <b>Verify:</b> SHM DC cannot connect to google</summary>
-    <img src=""/>122067607-ff5d8c80-cdf3-11eb-8e20-a401faba0be4.png">
-    </details>
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Admin can download Windows updates
-  + <details><summary>:camera: <b>Verify:</b> Windows updates can be downloaded</summary>
-    <img src=""/>122067641-071d3100-cdf4-11eb-9dc8-03938ff49e3a.png">
-    </details>
diff --git a/.github/workflows/lint_code.yaml b/.github/workflows/lint_code.yaml
index 7786fc4b62..4d0caed16c 100644
--- a/.github/workflows/lint_code.yaml
+++ b/.github/workflows/lint_code.yaml
@@ -108,7 +108,7 @@ jobs:
           done
           rm expanded.tmp
       - name: Lint YAML
-        uses: karancode/yamllint-github-action@v2.1.1
+        uses: karancode/yamllint-github-action@v3.0.0
         with:
           yamllint_strict: true
           yamllint_comment: false
diff --git a/.hatch/requirements-docs.txt b/.hatch/requirements-docs.txt
index 95fcbdfd41..cebd1e3c16 100644
--- a/.hatch/requirements-docs.txt
+++ b/.hatch/requirements-docs.txt
@@ -91,7 +91,7 @@ typing-extensions==4.12.2
     # via pydata-sphinx-theme
 urllib3==2.2.3
     # via requests
-wheel==0.45.0
+wheel==0.45.1
     # via sphinx-togglebutton
 
 # The following packages are considered to be unsafe in a requirements file:
diff --git a/.hatch/requirements-lint.txt b/.hatch/requirements-lint.txt
index 295c694a65..c09dc3ebb9 100644
--- a/.hatch/requirements-lint.txt
+++ b/.hatch/requirements-lint.txt
@@ -1,13 +1,13 @@
 #
 # This file is autogenerated by hatch-pip-compile with Python 3.12
 #
-# - ansible-dev-tools==24.10.2
-# - ansible==10.6.0
+# - ansible-dev-tools==24.11.0
+# - ansible==11.0.0
 # - black==24.10.0
 # - mypy==1.13.0
-# - pandas-stubs==2.2.3.241009
-# - pydantic==2.9.2
-# - ruff==0.7.4
+# - pandas-stubs==2.2.3.241126
+# - pydantic==2.10.2
+# - ruff==0.8.1
 # - types-appdirs==1.4.3.5
 # - types-chevron==0.14.2.20240310
 # - types-pytz==2024.2.0.20241003
@@ -17,7 +17,7 @@
 
 annotated-types==0.7.0
     # via pydantic
-ansible==10.6.0
+ansible==11.0.0
     # via hatch.envs.lint
 ansible-builder==3.1.0
     # via
@@ -29,7 +29,7 @@ ansible-compat==24.10.0
     #   ansible-lint
     #   molecule
     #   pytest-ansible
-ansible-core==2.17.6
+ansible-core==2.18.0
     # via
     #   ansible
     #   ansible-compat
@@ -40,7 +40,7 @@ ansible-creator==24.11.0
     # via ansible-dev-tools
 ansible-dev-environment==24.9.0
     # via ansible-dev-tools
-ansible-dev-tools==24.10.2
+ansible-dev-tools==24.11.0
     # via hatch.envs.lint
 ansible-lint==24.10.0
     # via
@@ -81,7 +81,7 @@ click-help-colors==0.9.4
     # via molecule
 colorama==0.4.6
     # via tox
-cryptography==43.0.3
+cryptography==44.0.0
     # via ansible-core
 distlib==0.3.9
     # via
@@ -151,7 +151,7 @@ packaging==24.2
     #   pytest
     #   pytest-ansible
     #   tox
-pandas-stubs==2.2.3.241009
+pandas-stubs==2.2.3.241126
     # via hatch.envs.lint
 parsley==1.3
     # via bindep
@@ -178,15 +178,15 @@ ptyprocess==0.7.0
     # via pexpect
 pycparser==2.22
     # via cffi
-pydantic==2.9.2
+pydantic==2.10.2
     # via hatch.envs.lint
-pydantic-core==2.23.4
+pydantic-core==2.27.1
     # via pydantic
 pygments==2.18.0
     # via rich
 pyproject-api==1.8.0
     # via tox
-pytest==8.3.3
+pytest==8.3.4
     # via
     #   pytest-ansible
     #   pytest-xdist
@@ -197,7 +197,7 @@ pytest-ansible==24.9.0
     #   tox-ansible
 pytest-xdist==3.6.1
     # via tox-ansible
-python-daemon==3.1.0
+python-daemon==3.1.1
     # via ansible-runner
 python-gnupg==0.5.3
     # via ansible-sign
@@ -233,7 +233,7 @@ ruamel-yaml==0.18.6
     # via ansible-lint
 ruamel-yaml-clib==0.2.12
     # via ruamel-yaml
-ruff==0.7.4
+ruff==0.8.1
     # via hatch.envs.lint
 subprocess-tee==0.4.2
     # via
@@ -265,7 +265,7 @@ tzdata==2024.2
     # via ansible-navigator
 urllib3==2.2.3
     # via types-requests
-virtualenv==20.27.1
+virtualenv==20.28.0
     # via tox
 wcmatch==10.0
     # via
diff --git a/.hatch/requirements-test.txt b/.hatch/requirements-test.txt
index 643331837b..ce24e1caee 100644
--- a/.hatch/requirements-test.txt
+++ b/.hatch/requirements-test.txt
@@ -1,7 +1,7 @@
 #
 # This file is autogenerated by hatch-pip-compile with Python 3.12
 #
-# [constraints] .hatch/requirements.txt (SHA256: ca6dfe8295dd8d2e6e4ade0fce58d158854ce5df89be8d092b36c34fe2679f3f)
+# [constraints] .hatch/requirements.txt (SHA256: 9b78097f41c11566a80e32726aefa74a983ac227fce27db9adba04ae7594da1c)
 #
 # - appdirs==1.4.4
 # - azure-core==1.32.0
@@ -21,25 +21,25 @@
 # - azure-storage-file-datalake==12.18.0
 # - azure-storage-file-share==12.20.0
 # - chevron==0.14.0
-# - cryptography==43.0.3
+# - cryptography==44.0.0
 # - fqdn==1.5.1
 # - psycopg[binary]==3.1.19
-# - pulumi-azure-native==2.72.0
+# - pulumi-azure-native==2.74.0
 # - pulumi-azuread==6.0.1
 # - pulumi-random==4.16.7
-# - pulumi==3.139.0
-# - pydantic==2.9.2
-# - pyjwt[crypto]==2.10.0
+# - pulumi==3.142.0
+# - pydantic==2.10.2
+# - pyjwt[crypto]==2.10.1
 # - pytz==2024.2
 # - pyyaml==6.0.2
 # - rich==13.9.4
 # - simple-acme-dns==3.2.0
-# - typer==0.13.0
+# - typer==0.14.0
 # - websocket-client==1.8.0
-# - coverage==7.6.7
+# - coverage==7.6.8
 # - freezegun==1.5.1
 # - pytest-mock==3.14.0
-# - pytest==8.3.3
+# - pytest==8.3.4
 # - requests-mock==1.12.1
 #
 
@@ -180,9 +180,9 @@ click==8.1.7
     # via
     #   -c .hatch/requirements.txt
     #   typer
-coverage==7.6.7
+coverage==7.6.8
     # via hatch.envs.test
-cryptography==43.0.3
+cryptography==44.0.0
     # via
     #   -c .hatch/requirements.txt
     #   hatch.envs.test
@@ -195,7 +195,7 @@ cryptography==43.0.3
     #   msal
     #   pyjwt
     #   pyopenssl
-debugpy==1.8.8
+debugpy==1.8.9
     # via
     #   -c .hatch/requirements.txt
     #   pulumi
@@ -251,7 +251,7 @@ mdurl==0.1.2
     # via
     #   -c .hatch/requirements.txt
     #   markdown-it-py
-msal==1.31.0
+msal==1.31.1
     # via
     #   -c .hatch/requirements.txt
     #   azure-identity
@@ -295,14 +295,14 @@ psycopg-binary==3.1.19
     # via
     #   -c .hatch/requirements.txt
     #   psycopg
-pulumi==3.139.0
+pulumi==3.142.0
     # via
     #   -c .hatch/requirements.txt
     #   hatch.envs.test
     #   pulumi-azure-native
     #   pulumi-azuread
     #   pulumi-random
-pulumi-azure-native==2.72.0
+pulumi-azure-native==2.74.0
     # via
     #   -c .hatch/requirements.txt
     #   hatch.envs.test
@@ -318,11 +318,11 @@ pycparser==2.22
     # via
     #   -c .hatch/requirements.txt
     #   cffi
-pydantic==2.9.2
+pydantic==2.10.2
     # via
     #   -c .hatch/requirements.txt
     #   hatch.envs.test
-pydantic-core==2.23.4
+pydantic-core==2.27.1
     # via
     #   -c .hatch/requirements.txt
     #   pydantic
@@ -330,12 +330,12 @@ pygments==2.18.0
     # via
     #   -c .hatch/requirements.txt
     #   rich
-pyjwt==2.10.0
+pyjwt==2.10.1
     # via
     #   -c .hatch/requirements.txt
     #   hatch.envs.test
     #   msal
-pyopenssl==24.2.1
+pyopenssl==24.3.0
     # via
     #   -c .hatch/requirements.txt
     #   acme
@@ -344,7 +344,7 @@ pyrfc3339==2.0.1
     # via
     #   -c .hatch/requirements.txt
     #   acme
-pytest==8.3.3
+pytest==8.3.4
     # via
     #   hatch.envs.test
     #   pytest-mock
@@ -403,7 +403,7 @@ six==1.16.0
     #   azure-core
     #   pulumi
     #   python-dateutil
-typer==0.13.0
+typer==0.14.0
     # via
     #   -c .hatch/requirements.txt
     #   hatch.envs.test
diff --git a/.hatch/requirements.txt b/.hatch/requirements.txt
index b0f7aff926..f2589f1f68 100644
--- a/.hatch/requirements.txt
+++ b/.hatch/requirements.txt
@@ -19,20 +19,20 @@
 # - azure-storage-file-datalake==12.18.0
 # - azure-storage-file-share==12.20.0
 # - chevron==0.14.0
-# - cryptography==43.0.3
+# - cryptography==44.0.0
 # - fqdn==1.5.1
 # - psycopg[binary]==3.1.19
-# - pulumi-azure-native==2.72.0
+# - pulumi-azure-native==2.74.0
 # - pulumi-azuread==6.0.1
 # - pulumi-random==4.16.7
-# - pulumi==3.139.0
-# - pydantic==2.9.2
-# - pyjwt[crypto]==2.10.0
+# - pulumi==3.142.0
+# - pydantic==2.10.2
+# - pyjwt[crypto]==2.10.1
 # - pytz==2024.2
 # - pyyaml==6.0.2
 # - rich==13.9.4
 # - simple-acme-dns==3.2.0
-# - typer==0.13.0
+# - typer==0.14.0
 # - websocket-client==1.8.0
 #
 
@@ -122,7 +122,7 @@ chevron==0.14.0
     # via hatch.envs.default
 click==8.1.7
     # via typer
-cryptography==43.0.3
+cryptography==44.0.0
     # via
     #   hatch.envs.default
     #   acme
@@ -134,7 +134,7 @@ cryptography==43.0.3
     #   msal
     #   pyjwt
     #   pyopenssl
-debugpy==1.8.8
+debugpy==1.8.9
     # via pulumi
 dill==0.3.9
     # via pulumi
@@ -167,7 +167,7 @@ markdown-it-py==3.0.0
     # via rich
 mdurl==0.1.2
     # via markdown-it-py
-msal==1.31.0
+msal==1.31.1
     # via
     #   azure-identity
     #   msal-extensions
@@ -192,13 +192,13 @@ psycopg==3.1.19
     # via hatch.envs.default
 psycopg-binary==3.1.19
     # via psycopg
-pulumi==3.139.0
+pulumi==3.142.0
     # via
     #   hatch.envs.default
     #   pulumi-azure-native
     #   pulumi-azuread
     #   pulumi-random
-pulumi-azure-native==2.72.0
+pulumi-azure-native==2.74.0
     # via hatch.envs.default
 pulumi-azuread==6.0.1
     # via hatch.envs.default
@@ -206,17 +206,17 @@ pulumi-random==4.16.7
     # via hatch.envs.default
 pycparser==2.22
     # via cffi
-pydantic==2.9.2
+pydantic==2.10.2
     # via hatch.envs.default
-pydantic-core==2.23.4
+pydantic-core==2.27.1
     # via pydantic
 pygments==2.18.0
     # via rich
-pyjwt==2.10.0
+pyjwt==2.10.1
     # via
     #   hatch.envs.default
     #   msal
-pyopenssl==24.2.1
+pyopenssl==24.3.0
     # via
     #   acme
     #   josepy
@@ -257,7 +257,7 @@ six==1.16.0
     # via
     #   azure-core
     #   pulumi
-typer==0.13.0
+typer==0.14.0
     # via hatch.envs.default
 typing-extensions==4.12.2
     # via
diff --git a/.mdlstyle.rb b/.mdlstyle.rb
index 7ca3c2af8b..80b6e14d8c 100644
--- a/.mdlstyle.rb
+++ b/.mdlstyle.rb
@@ -6,7 +6,7 @@
 exclude_rule 'MD013'
 exclude_rule 'MD024'
 rule 'MD026', :punctuation => ".,;"
-rule 'MD029', :style => :ordered
+rule 'MD029', :style => :one
 exclude_rule 'MD033'
 exclude_rule 'MD034'
 exclude_rule 'MD041' # this conflicts with MyST target anchors
diff --git a/SECURITY.md b/SECURITY.md
index c81368a94e..9aee903593 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -7,8 +7,8 @@ All organisations using an earlier version in production should update to the la
 
 | Version                                                                                 | Supported          |
 | --------------------------------------------------------------------------------------- | ------------------ |
-| [5.1.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.1.0)   | :white_check_mark: |
-| < 5.1.0                                                                                 | :x:                |
+| [5.2.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.1.0)   | :white_check_mark: |
+| < 5.2.0                                                                                 | :x:                |
 
 ## Reporting a Vulnerability
 
diff --git a/data_safe_haven/commands/sre.py b/data_safe_haven/commands/sre.py
index 8c3e0b5cdc..14330c2cff 100644
--- a/data_safe_haven/commands/sre.py
+++ b/data_safe_haven/commands/sre.py
@@ -96,6 +96,7 @@ def deploy(
         )
         # Set Entra options
         application = graph_api.get_application_by_name(context.entra_application_name)
+
         if not application:
             msg = f"No Entra application '{context.entra_application_name}' was found. Please redeploy your SHM."
             raise DataSafeHavenConfigError(msg)
@@ -162,6 +163,13 @@ def deploy(
             timezone=sre_config.sre.timezone,
         )
         manager.run()
+
+        console.print(
+            f"Secure Research Environment '[green]{name}[/]' has been successfully deployed.",
+            f"The SRE can be accessed at [green]https://{stack.output('sre_fqdn')}[/]",
+            sep="\n",
+        )
+
     except DataSafeHavenError as exc:
         logger.critical(
             f"Could not deploy Secure Research Environment '[green]{name}[/]'."
diff --git a/data_safe_haven/commands/users.py b/data_safe_haven/commands/users.py
index fe413fa781..8c8b232ceb 100644
--- a/data_safe_haven/commands/users.py
+++ b/data_safe_haven/commands/users.py
@@ -5,6 +5,7 @@
 
 import typer
 
+from data_safe_haven import console
 from data_safe_haven.administration.users import UserHandler
 from data_safe_haven.config import ContextManager, DSHPulumiConfig, SHMConfig, SREConfig
 from data_safe_haven.exceptions import DataSafeHavenError
@@ -120,9 +121,9 @@ def register(
         # Load SHMConfig
         try:
             shm_config = SHMConfig.from_remote(context)
-        except DataSafeHavenError:
+        except DataSafeHavenError as exc:
             logger.error("Have you deployed the SHM?")
-            raise
+            raise typer.Exit(1) from exc
 
         # Load Pulumi config
         pulumi_config = DSHPulumiConfig.from_remote(context)
@@ -132,7 +133,7 @@ def register(
         if sre_config.name not in pulumi_config.project_names:
             msg = f"Could not load Pulumi settings for '{sre_config.name}'. Have you deployed the SRE?"
             logger.error(msg)
-            raise DataSafeHavenError(msg)
+            raise typer.Exit(1)
 
         # Load GraphAPI
         graph_api = GraphApi.from_scopes(
@@ -146,16 +147,29 @@ def register(
 
         # List users
         users = UserHandler(context, graph_api)
-        available_usernames = users.get_usernames_entra_id()
+        available_users = users.entra_users.list()
+        user_dict = {
+            user.preferred_username.split("@")[0]: user.preferred_username.split("@")[1]
+            for user in available_users
+        }
         usernames_to_register = []
         for username in usernames:
-            if username in available_usernames:
-                usernames_to_register.append(username)
+            if user_domain := user_dict.get(username):
+                if shm_config.shm.fqdn not in user_domain:
+                    console.print(
+                        f"User [green]'{username}[/green]'s principal domain name is [blue]'{user_domain}'[/blue].\n"
+                        f"SRE [yellow]'{sre}'[/yellow] belongs to SHM domain [blue]'{shm_config.shm.fqdn}'[/blue]."
+                    )
+                    logger.error(
+                        "The user's principal domain name must match the domain of the SRE to be registered."
+                    )
+                else:
+                    usernames_to_register.append(username)
             else:
                 logger.error(
                     f"Username '{username}' does not belong to this Data Safe Haven deployment."
-                    " Please use 'dsh users add' to create it."
                 )
+                console.print("Please use 'dsh users add' to create this user.")
         users.register(sre_config.name, usernames_to_register)
     except DataSafeHavenError as exc:
         logger.critical(f"Could not register Data Safe Haven users with SRE '{sre}'.")
@@ -259,8 +273,8 @@ def unregister(
             else:
                 logger.error(
                     f"Username '{username}' does not belong to this Data Safe Haven deployment."
-                    " Please use 'dsh users add' to create it."
                 )
+                console.print("Please use 'dsh users add' to create it.")
         for group_name in (
             f"{sre_config.name} Users",
             f"{sre_config.name} Privileged Users",
diff --git a/data_safe_haven/console/__init__.py b/data_safe_haven/console/__init__.py
index 133a48fc12..f30bda2882 100644
--- a/data_safe_haven/console/__init__.py
+++ b/data_safe_haven/console/__init__.py
@@ -1,5 +1,5 @@
 from .format import tabulate
-from .pretty import pretty_print as print  # noqa: A001
+from .pretty import pretty_print as print  # noqa: A004
 from .prompts import confirm
 
 __all__ = [
diff --git a/data_safe_haven/exceptions/__init__.py b/data_safe_haven/exceptions/__init__.py
index b22d70e693..a858cfaf6d 100644
--- a/data_safe_haven/exceptions/__init__.py
+++ b/data_safe_haven/exceptions/__init__.py
@@ -28,6 +28,16 @@ class DataSafeHavenAzureError(DataSafeHavenError):
     pass
 
 
+class DataSafeHavenCachedCredentialError(DataSafeHavenError):
+    """
+    Exception class for handling errors related to cached credentials.
+
+    Raise this error when a cached credential is not the credential a user wants to use.
+    """
+
+    pass
+
+
 class DataSafeHavenAzureStorageError(DataSafeHavenAzureError):
     """
     Exception class for handling errors when interacting with Azure Storage.
diff --git a/data_safe_haven/external/__init__.py b/data_safe_haven/external/__init__.py
index 5e46325958..d26ef75058 100644
--- a/data_safe_haven/external/__init__.py
+++ b/data_safe_haven/external/__init__.py
@@ -6,10 +6,10 @@
 from .interface.pulumi_account import PulumiAccount
 
 __all__ = [
-    "AzureSdk",
     "AzureContainerInstance",
     "AzureIPv4Range",
     "AzurePostgreSQLDatabase",
+    "AzureSdk",
     "GraphApi",
     "PulumiAccount",
 ]
diff --git a/data_safe_haven/external/api/credentials.py b/data_safe_haven/external/api/credentials.py
index bfeb9c3aeb..82e444cea5 100644
--- a/data_safe_haven/external/api/credentials.py
+++ b/data_safe_haven/external/api/credentials.py
@@ -18,7 +18,11 @@
 
 from data_safe_haven import console
 from data_safe_haven.directories import config_dir
-from data_safe_haven.exceptions import DataSafeHavenAzureError, DataSafeHavenValueError
+from data_safe_haven.exceptions import (
+    DataSafeHavenAzureError,
+    DataSafeHavenCachedCredentialError,
+    DataSafeHavenValueError,
+)
 from data_safe_haven.logging import get_logger
 from data_safe_haven.types import AzureSdkCredentialScope
 
@@ -28,6 +32,7 @@ class DeferredCredential(TokenCredential):
 
     tokens_: ClassVar[dict[str, AccessToken]] = {}
     cache_: ClassVar[set[tuple[str, str]]] = set()
+    name: ClassVar[str] = "Credential name"
 
     def __init__(
         self,
@@ -66,32 +71,28 @@ def get_credential(self) -> TokenCredential:
 
     def confirm_credentials_interactive(
         self,
-        target_name: str,
         user_name: str,
         user_id: str,
         tenant_name: str,
         tenant_id: str,
-    ) -> None:
+    ) -> bool:
         """
         Allow user to confirm that credentials are correct.
 
         Responses are cached so the user will only be prompted once per run.
         If 'skip_confirmation' is set, then no confirmation will be performed.
-
-        Raises:
-            DataSafeHavenValueError: if the user indicates that the credentials are wrong
         """
         if self.skip_confirmation:
-            return
+            return True
         if (user_id, tenant_id) in DeferredCredential.cache_:
-            return
+            return True
+
         DeferredCredential.cache_.add((user_id, tenant_id))
-        self.logger.info(f"You are logged into the [blue]{target_name}[/] as:")
+        self.logger.info(f"You are logged into the [blue]{self.name}[/] as:")
         self.logger.info(f"\tuser: [green]{user_name}[/] ({user_id})")
         self.logger.info(f"\ttenant: [green]{tenant_name}[/] ({tenant_id})")
-        if not console.confirm("Are these details correct?", default_to_yes=True):
-            msg = "Selected credentials are incorrect."
-            raise DataSafeHavenValueError(msg)
+
+        return console.confirm("Are these details correct?", default_to_yes=True)
 
     def get_token(
         self,
@@ -119,6 +120,8 @@ class AzureSdkCredential(DeferredCredential):
     Uses AzureCliCredential for authentication
     """
 
+    name: ClassVar[str] = "Azure CLI"
+
     def __init__(
         self,
         scope: AzureSdkCredentialScope = AzureSdkCredentialScope.DEFAULT,
@@ -133,19 +136,22 @@ def get_credential(self) -> TokenCredential:
         # Confirm that these are the desired credentials
         try:
             decoded = self.decode_token(credential.get_token(*self.scopes).token)
-            self.confirm_credentials_interactive(
-                "Azure CLI",
-                user_name=decoded["name"],
-                user_id=decoded["oid"],
-                tenant_name=decoded["upn"].split("@")[1],
-                tenant_id=decoded["tid"],
-            )
         except (CredentialUnavailableError, DataSafeHavenValueError) as exc:
+            msg = "Error getting account information from Azure CLI."
+            raise DataSafeHavenAzureError(msg) from exc
+
+        if not self.confirm_credentials_interactive(
+            user_name=decoded["name"],
+            user_id=decoded["oid"],
+            tenant_name=decoded["upn"].split("@")[1],
+            tenant_id=decoded["tid"],
+        ):
             self.logger.error(
                 "Please authenticate with Azure: run '[green]az login[/]' using [bold]infrastructure administrator[/] credentials."
             )
-            msg = "Error getting account information from Azure CLI."
-            raise DataSafeHavenAzureError(msg) from exc
+            msg = "Selected credentials are incorrect."
+            raise DataSafeHavenCachedCredentialError(msg)
+
         return credential
 
 
@@ -156,6 +162,8 @@ class GraphApiCredential(DeferredCredential):
     Uses DeviceCodeCredential for authentication
     """
 
+    name: ClassVar[str] = "Microsoft Graph API"
+
     def __init__(
         self,
         tenant_id: str,
@@ -214,13 +222,17 @@ def callback(verification_uri: str, user_code: str, _: datetime) -> None:
             raise DataSafeHavenAzureError(msg) from exc
 
         # Confirm that these are the desired credentials
-        self.confirm_credentials_interactive(
-            "Microsoft Graph API",
+        if not self.confirm_credentials_interactive(
             user_name=new_auth_record.username,
             user_id=new_auth_record._home_account_id.split(".")[0],
             tenant_name=new_auth_record._username.split("@")[1],
             tenant_id=new_auth_record._tenant_id,
-        )
+        ):
+            self.logger.error(
+                f"Delete the cached credential file [green]{authentication_record_path}[/] and rerun dsh to authenticate with {self.name}"
+            )
+            msg = "Selected credentials are incorrect."
+            raise DataSafeHavenCachedCredentialError(msg)
 
         # Return the credential
         return credential
diff --git a/data_safe_haven/external/api/graph_api.py b/data_safe_haven/external/api/graph_api.py
index 7d3b088672..d77e78120d 100644
--- a/data_safe_haven/external/api/graph_api.py
+++ b/data_safe_haven/external/api/graph_api.py
@@ -837,7 +837,10 @@ def read_applications(self) -> Sequence[dict[str, Any]]:
                     "value"
                 ]
             ]
-        except Exception as exc:
+        except (
+            DataSafeHavenMicrosoftGraphError,
+            requests.JSONDecodeError,
+        ) as exc:
             msg = "Could not load list of applications."
             raise DataSafeHavenMicrosoftGraphError(msg) from exc
 
diff --git a/data_safe_haven/infrastructure/common/__init__.py b/data_safe_haven/infrastructure/common/__init__.py
index 6106cac731..85184d6574 100644
--- a/data_safe_haven/infrastructure/common/__init__.py
+++ b/data_safe_haven/infrastructure/common/__init__.py
@@ -16,6 +16,8 @@
 
 __all__ = [
     "DockerHubCredentials",
+    "SREDnsIpRanges",
+    "SREIpRanges",
     "get_address_prefixes_from_subnet",
     "get_available_ips_from_subnet",
     "get_id_from_rg",
@@ -27,6 +29,4 @@
     "get_name_from_subnet",
     "get_name_from_vnet",
     "get_subscription_id_from_rg",
-    "SREDnsIpRanges",
-    "SREIpRanges",
 ]
diff --git a/data_safe_haven/infrastructure/components/__init__.py b/data_safe_haven/infrastructure/components/__init__.py
index 2b3dd67e7a..52043d1ad3 100644
--- a/data_safe_haven/infrastructure/components/__init__.py
+++ b/data_safe_haven/infrastructure/components/__init__.py
@@ -9,6 +9,8 @@
     MicrosoftSQLDatabaseProps,
     NFSV3BlobContainerComponent,
     NFSV3BlobContainerProps,
+    NFSV3StorageAccountComponent,
+    NFSV3StorageAccountProps,
     PostgresqlDatabaseComponent,
     PostgresqlDatabaseProps,
     VMComponent,
@@ -23,7 +25,6 @@
 )
 from .wrapped import (
     WrappedLogAnalyticsWorkspace,
-    WrappedNFSV3StorageAccount,
 )
 
 __all__ = [
@@ -41,7 +42,8 @@
     "MicrosoftSQLDatabaseProps",
     "NFSV3BlobContainerComponent",
     "NFSV3BlobContainerProps",
-    "WrappedNFSV3StorageAccount",
+    "NFSV3StorageAccountComponent",
+    "NFSV3StorageAccountProps",
     "PostgresqlDatabaseComponent",
     "PostgresqlDatabaseProps",
     "SSLCertificate",
diff --git a/data_safe_haven/infrastructure/components/composite/__init__.py b/data_safe_haven/infrastructure/components/composite/__init__.py
index bc09bc18a8..8e561dd73a 100644
--- a/data_safe_haven/infrastructure/components/composite/__init__.py
+++ b/data_safe_haven/infrastructure/components/composite/__init__.py
@@ -9,6 +9,10 @@
     MicrosoftSQLDatabaseProps,
 )
 from .nfsv3_blob_container import NFSV3BlobContainerComponent, NFSV3BlobContainerProps
+from .nfsv3_storage_account import (
+    NFSV3StorageAccountComponent,
+    NFSV3StorageAccountProps,
+)
 from .postgresql_database import PostgresqlDatabaseComponent, PostgresqlDatabaseProps
 from .virtual_machine import LinuxVMComponentProps, VMComponent
 
@@ -23,6 +27,8 @@
     "MicrosoftSQLDatabaseProps",
     "NFSV3BlobContainerComponent",
     "NFSV3BlobContainerProps",
+    "NFSV3StorageAccountComponent",
+    "NFSV3StorageAccountProps",
     "PostgresqlDatabaseComponent",
     "PostgresqlDatabaseProps",
     "VMComponent",
diff --git a/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py b/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py
index 98564918a0..29550e9541 100644
--- a/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py
+++ b/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py
@@ -1,7 +1,7 @@
 from pulumi import ComponentResource, Input, ResourceOptions
 from pulumi_azure_native import storage
 
-from data_safe_haven.infrastructure.components.dynamic.blob_container_acl import (
+from data_safe_haven.infrastructure.components.dynamic import (
     BlobContainerAcl,
     BlobContainerAclProps,
 )
@@ -52,6 +52,7 @@ def __init__(
                 ResourceOptions(parent=props.storage_account),
             ),
         )
+
         BlobContainerAcl(
             f"{storage_container._name}_acl",
             BlobContainerAclProps(
diff --git a/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py b/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py
new file mode 100644
index 0000000000..ca003bbd3d
--- /dev/null
+++ b/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py
@@ -0,0 +1,144 @@
+from collections.abc import Mapping, Sequence
+
+from pulumi import ComponentResource, Input, Output, ResourceOptions
+from pulumi_azure_native import insights, storage
+
+from data_safe_haven.external import AzureIPv4Range
+from data_safe_haven.infrastructure.components.wrapped import (
+    WrappedLogAnalyticsWorkspace,
+)
+from data_safe_haven.types import AzureServiceTag
+
+
+class NFSV3StorageAccountProps:
+    def __init__(
+        self,
+        account_name: Input[str],
+        allowed_ip_addresses: Input[Sequence[str]] | None,
+        allowed_service_tag: AzureServiceTag | None,
+        location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
+        resource_group_name: Input[str],
+        subnet_id: Input[str],
+    ):
+        self.account_name = account_name
+        self.allowed_ip_addresses = allowed_ip_addresses
+        self.allowed_service_tag = allowed_service_tag
+        self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
+        self.resource_group_name = resource_group_name
+        self.subnet_id = subnet_id
+
+
+class NFSV3StorageAccountComponent(ComponentResource):
+    encryption_args = storage.EncryptionArgs(
+        key_source=storage.KeySource.MICROSOFT_STORAGE,
+        services=storage.EncryptionServicesArgs(
+            blob=storage.EncryptionServiceArgs(
+                enabled=True, key_type=storage.KeyType.ACCOUNT
+            ),
+            file=storage.EncryptionServiceArgs(
+                enabled=True, key_type=storage.KeyType.ACCOUNT
+            ),
+        ),
+    )
+
+    def __init__(
+        self,
+        name: str,
+        props: NFSV3StorageAccountProps,
+        opts: ResourceOptions | None = None,
+        tags: Input[Mapping[str, Input[str]]] | None = None,
+    ):
+        super().__init__("dsh:sre:NFSV3StorageAccountComponent", name, {}, opts)
+        child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self))
+        child_tags = {"component": "data"} | (tags if tags else {})
+
+        if props.allowed_service_tag == AzureServiceTag.INTERNET:
+            default_action = storage.DefaultAction.ALLOW
+            ip_rules = []
+        else:
+            default_action = storage.DefaultAction.DENY
+            ip_rules = Output.from_input(props.allowed_ip_addresses).apply(
+                lambda ip_ranges: [
+                    storage.IPRuleArgs(
+                        action=storage.Action.ALLOW,
+                        i_p_address_or_range=str(ip_address),
+                    )
+                    for ip_range in sorted(ip_ranges)
+                    for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips()
+                ]
+            )
+
+        # Deploy storage account
+        self.storage_account = storage.StorageAccount(
+            f"{self._name}",
+            account_name=props.account_name,
+            allow_blob_public_access=False,
+            enable_https_traffic_only=True,
+            enable_nfs_v3=True,
+            encryption=self.encryption_args,
+            is_hns_enabled=True,
+            kind=storage.Kind.BLOCK_BLOB_STORAGE,
+            location=props.location,
+            minimum_tls_version=storage.MinimumTlsVersion.TLS1_2,
+            network_rule_set=storage.NetworkRuleSetArgs(
+                bypass=storage.Bypass.AZURE_SERVICES,
+                default_action=default_action,
+                ip_rules=ip_rules,
+                virtual_network_rules=[
+                    storage.VirtualNetworkRuleArgs(
+                        virtual_network_resource_id=props.subnet_id,
+                    )
+                ],
+            ),
+            public_network_access=storage.PublicNetworkAccess.ENABLED,
+            resource_group_name=props.resource_group_name,
+            sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS),
+            opts=child_opts,
+            tags=child_tags,
+        )
+
+        # Add diagnostic setting for blobs
+        insights.DiagnosticSetting(
+            f"{self.storage_account._name}_diagnostic_setting",
+            name=f"{self.storage_account._name}_diagnostic_setting",
+            log_analytics_destination_type="Dedicated",
+            logs=[
+                {
+                    "category_group": "allLogs",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+                {
+                    "category_group": "audit",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+            ],
+            metrics=[
+                {
+                    "category": "Transaction",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                }
+            ],
+            resource_uri=self.storage_account.id.apply(
+                # This is the URI of the blobServices resource which is automatically
+                # created.
+                lambda resource_id: resource_id
+                + "/blobServices/default"
+            ),
+            workspace_id=props.log_analytics_workspace.id,
+        )
+
+        self.register_outputs({})
diff --git a/data_safe_haven/infrastructure/components/wrapped/__init__.py b/data_safe_haven/infrastructure/components/wrapped/__init__.py
index ef6e7374d2..fc5f8c8f61 100644
--- a/data_safe_haven/infrastructure/components/wrapped/__init__.py
+++ b/data_safe_haven/infrastructure/components/wrapped/__init__.py
@@ -1,7 +1,5 @@
 from .log_analytics_workspace import WrappedLogAnalyticsWorkspace
-from .nfsv3_storage_account import WrappedNFSV3StorageAccount
 
 __all__ = [
-    "WrappedNFSV3StorageAccount",
     "WrappedLogAnalyticsWorkspace",
 ]
diff --git a/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py b/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py
deleted file mode 100644
index e259de4806..0000000000
--- a/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from collections.abc import Mapping, Sequence
-
-from pulumi import Input, Output, ResourceOptions
-from pulumi_azure_native import storage
-
-from data_safe_haven.external import AzureIPv4Range
-from data_safe_haven.types import AzureServiceTag
-
-
-class WrappedNFSV3StorageAccount(storage.StorageAccount):
-    encryption_args = storage.EncryptionArgs(
-        key_source=storage.KeySource.MICROSOFT_STORAGE,
-        services=storage.EncryptionServicesArgs(
-            blob=storage.EncryptionServiceArgs(
-                enabled=True, key_type=storage.KeyType.ACCOUNT
-            ),
-            file=storage.EncryptionServiceArgs(
-                enabled=True, key_type=storage.KeyType.ACCOUNT
-            ),
-        ),
-    )
-
-    def __init__(
-        self,
-        resource_name: str,
-        *,
-        account_name: Input[str],
-        allowed_ip_addresses: Input[Sequence[str]] | None,
-        allowed_service_tag: AzureServiceTag | None,
-        location: Input[str],
-        resource_group_name: Input[str],
-        subnet_id: Input[str],
-        opts: ResourceOptions,
-        tags: Input[Mapping[str, Input[str]]],
-    ):
-        if allowed_service_tag == AzureServiceTag.INTERNET:
-            default_action = storage.DefaultAction.ALLOW
-            ip_rules = []
-        else:
-            default_action = storage.DefaultAction.DENY
-            ip_rules = Output.from_input(allowed_ip_addresses).apply(
-                lambda ip_ranges: [
-                    storage.IPRuleArgs(
-                        action=storage.Action.ALLOW,
-                        i_p_address_or_range=str(ip_address),
-                    )
-                    for ip_range in sorted(ip_ranges)
-                    for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips()
-                ]
-            )
-
-        self.resource_group_name_ = Output.from_input(resource_group_name)
-        super().__init__(
-            resource_name,
-            account_name=account_name,
-            allow_blob_public_access=False,
-            enable_https_traffic_only=True,
-            enable_nfs_v3=True,
-            encryption=self.encryption_args,
-            is_hns_enabled=True,
-            kind=storage.Kind.BLOCK_BLOB_STORAGE,
-            location=location,
-            minimum_tls_version=storage.MinimumTlsVersion.TLS1_2,
-            network_rule_set=storage.NetworkRuleSetArgs(
-                bypass=storage.Bypass.AZURE_SERVICES,
-                default_action=default_action,
-                ip_rules=ip_rules,
-                virtual_network_rules=[
-                    storage.VirtualNetworkRuleArgs(
-                        virtual_network_resource_id=subnet_id,
-                    )
-                ],
-            ),
-            public_network_access=storage.PublicNetworkAccess.ENABLED,
-            resource_group_name=resource_group_name,
-            sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS),
-            opts=opts,
-            tags=tags,
-        )
diff --git a/data_safe_haven/infrastructure/programs/declarative_sre.py b/data_safe_haven/infrastructure/programs/declarative_sre.py
index 15989bbe7b..2228078c36 100644
--- a/data_safe_haven/infrastructure/programs/declarative_sre.py
+++ b/data_safe_haven/infrastructure/programs/declarative_sre.py
@@ -163,12 +163,27 @@ def __call__(self) -> None:
             ),
         )
 
+        # Deploy monitoring
+        monitoring = SREMonitoringComponent(
+            "sre_monitoring",
+            self.stack_name,
+            SREMonitoringProps(
+                dns_private_zones=dns.private_zones,
+                location=self.config.azure.location,
+                resource_group_name=resource_group.name,
+                subnet=networking.subnet_monitoring,
+                timezone=self.config.sre.timezone,
+            ),
+            tags=self.tags,
+        )
+
         # Deploy SRE firewall
         SREFirewallComponent(
             "sre_firewall",
             self.stack_name,
             SREFirewallProps(
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group_name=resource_group.name,
                 route_table_name=networking.route_table_name,
                 subnet_apt_proxy_server=networking.subnet_apt_proxy_server,
@@ -196,6 +211,7 @@ def __call__(self) -> None:
                 dns_record=networking.shm_ns_record,
                 dns_server_admin_password=dns.password_admin,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group=resource_group,
                 sre_fqdn=networking.sre_fqdn,
                 storage_quota_gb_home=self.config.sre.storage_quota_gb.home,
@@ -217,6 +233,7 @@ def __call__(self) -> None:
                 containers_subnet=networking.subnet_apt_proxy_server,
                 dns_server_ip=dns.ip_address,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group_name=resource_group.name,
                 sre_fqdn=networking.sre_fqdn,
                 storage_account_key=data.storage_account_data_configuration_key,
@@ -233,6 +250,7 @@ def __call__(self) -> None:
                 dns_server_ip=dns.ip_address,
                 dockerhub_credentials=dockerhub_credentials,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group_name=resource_group.name,
                 sre_fqdn=networking.sre_fqdn,
                 storage_account_key=data.storage_account_data_configuration_key,
@@ -253,6 +271,7 @@ def __call__(self) -> None:
                 entra_application_secret=entra.identity_application_secret,
                 entra_tenant_id=shm_entra_tenant_id,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group_name=resource_group.name,
                 shm_fqdn=shm_fqdn,
                 sre_fqdn=networking.sre_fqdn,
@@ -299,6 +318,7 @@ def __call__(self) -> None:
                 ldap_user_filter=ldap_user_filter,
                 ldap_user_search_base=ldap_user_search_base,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group_name=resource_group.name,
                 storage_account_key=data.storage_account_data_configuration_key,
                 storage_account_name=data.storage_account_data_configuration_name,
@@ -325,6 +345,7 @@ def __call__(self) -> None:
                 ldap_username_attribute=ldap_username_attribute,
                 ldap_user_search_base=ldap_user_search_base,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 nexus_admin_password=data.password_nexus_admin,
                 resource_group_name=resource_group.name,
                 software_packages=self.config.sre.software_packages,
@@ -339,20 +360,6 @@ def __call__(self) -> None:
             tags=self.tags,
         )
 
-        # Deploy monitoring
-        monitoring = SREMonitoringComponent(
-            "sre_monitoring",
-            self.stack_name,
-            SREMonitoringProps(
-                dns_private_zones=dns.private_zones,
-                location=self.config.azure.location,
-                resource_group_name=resource_group.name,
-                subnet=networking.subnet_monitoring,
-                timezone=self.config.sre.timezone,
-            ),
-            tags=self.tags,
-        )
-
         # Deploy desired state
         desired_state = SREDesiredStateComponent(
             "sre_desired_state",
@@ -371,6 +378,7 @@ def __call__(self) -> None:
                 ldap_user_filter=ldap_user_filter,
                 ldap_user_search_base=ldap_user_search_base,
                 location=self.config.azure.location,
+                log_analytics_workspace=monitoring.log_analytics,
                 resource_group=resource_group,
                 software_repository_hostname=user_services.software_repositories.hostname,
                 subnet_desired_state=networking.subnet_desired_state,
@@ -420,4 +428,5 @@ def __call__(self) -> None:
         pulumi.export("data", data.exports)
         pulumi.export("ldap", ldap_group_names)
         pulumi.export("remote_desktop", remote_desktop.exports)
+        pulumi.export("sre_fqdn", networking.sre_fqdn)
         pulumi.export("workspaces", workspaces.exports)
diff --git a/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py b/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py
index ff1cb4b0da..d58a17a6de 100644
--- a/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py
+++ b/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py
@@ -12,6 +12,7 @@
     FileShareFileProps,
     LocalDnsRecordComponent,
     LocalDnsRecordProps,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.types import PermittedDomains
 
@@ -24,6 +25,7 @@ def __init__(
         containers_subnet: Input[str],
         dns_server_ip: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         sre_fqdn: Input[str],
         storage_account_key: Input[str],
@@ -34,6 +36,7 @@ def __init__(
         )
         self.dns_server_ip = dns_server_ip
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.sre_fqdn = sre_fqdn
         self.storage_account_key = storage_account_key
@@ -119,6 +122,12 @@ def __init__(
                     ],
                 ),
             ],
+            diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                log_analytics=containerinstance.LogAnalyticsArgs(
+                    workspace_id=props.log_analytics_workspace.workspace_id,
+                    workspace_key=props.log_analytics_workspace.workspace_key,
+                ),
+            ),
             dns_config=containerinstance.DnsConfigurationArgs(
                 name_servers=[props.dns_server_ip],
             ),
diff --git a/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py b/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py
index 203334a21b..e6f81df6cb 100644
--- a/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py
+++ b/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py
@@ -11,6 +11,7 @@
 from data_safe_haven.infrastructure.components import (
     LocalDnsRecordComponent,
     LocalDnsRecordProps,
+    WrappedLogAnalyticsWorkspace,
 )
 
 
@@ -22,6 +23,7 @@ def __init__(
         dns_server_ip: Input[str],
         dockerhub_credentials: DockerHubCredentials,
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         sre_fqdn: Input[str],
         storage_account_key: Input[str],
@@ -31,6 +33,7 @@ def __init__(
         self.dns_server_ip = dns_server_ip
         self.dockerhub_credentials = dockerhub_credentials
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.sre_fqdn = sre_fqdn
         self.storage_account_key = storage_account_key
@@ -95,6 +98,12 @@ def __init__(
                     ],
                 ),
             ],
+            diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                log_analytics=containerinstance.LogAnalyticsArgs(
+                    workspace_id=props.log_analytics_workspace.workspace_id,
+                    workspace_key=props.log_analytics_workspace.workspace_key,
+                ),
+            ),
             dns_config=containerinstance.DnsConfigurationArgs(
                 name_servers=[props.dns_server_ip],
             ),
diff --git a/data_safe_haven/infrastructure/programs/sre/data.py b/data_safe_haven/infrastructure/programs/sre/data.py
index 711b76139f..825861c122 100644
--- a/data_safe_haven/infrastructure/programs/sre/data.py
+++ b/data_safe_haven/infrastructure/programs/sre/data.py
@@ -7,6 +7,7 @@
 from pulumi import ComponentResource, Input, Output, ResourceOptions
 from pulumi_azure_native import (
     authorization,
+    insights,
     keyvault,
     managedidentity,
     network,
@@ -31,9 +32,11 @@
 from data_safe_haven.infrastructure.components import (
     NFSV3BlobContainerComponent,
     NFSV3BlobContainerProps,
+    NFSV3StorageAccountComponent,
+    NFSV3StorageAccountProps,
     SSLCertificate,
     SSLCertificateProps,
-    WrappedNFSV3StorageAccount,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.types import AzureDnsZoneNames, AzureServiceTag
 
@@ -51,6 +54,7 @@ def __init__(
         dns_record: Input[network.RecordSet],
         dns_server_admin_password: Input[pulumi_random.RandomPassword],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group: Input[resources.ResourceGroup],
         sre_fqdn: Input[str],
         storage_quota_gb_home: Input[int],
@@ -69,6 +73,7 @@ def __init__(
         self.dns_record = dns_record
         self.password_dns_server_admin = dns_server_admin_password
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg)
         self.resource_group_name = Output.from_input(resource_group).apply(
             get_name_from_rg
@@ -421,6 +426,45 @@ def __init__(
                 resource_group_name=kwargs["resource_group_name"],
             )
         )
+        # Add diagnostic setting for files
+        insights.DiagnosticSetting(
+            f"{storage_account_data_configuration._name}_diagnostic_setting",
+            name=f"{storage_account_data_configuration._name}_diagnostic_setting",
+            log_analytics_destination_type="Dedicated",
+            logs=[
+                {
+                    "category_group": "allLogs",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+                {
+                    "category_group": "audit",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+            ],
+            metrics=[
+                {
+                    "category": "Transaction",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                }
+            ],
+            # This is the URI of the automatically created fileService resource
+            resource_uri=Output.concat(
+                storage_account_data_configuration.id, "/fileServices/default"
+            ),
+            workspace_id=props.log_analytics_workspace.id,
+        )
         # Set up a private endpoint for the configuration data storage account
         storage_account_data_configuration_private_endpoint = network.PrivateEndpoint(
             f"{storage_account_data_configuration._name}_private_endpoint",
@@ -467,20 +511,26 @@ def __init__(
         # Deploy sensitive data blob storage account
         # - This holds the /mnt/input and /mnt/output containers that are mounted by workspaces
         # - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer
-        storage_account_data_private_sensitive = WrappedNFSV3StorageAccount(
+        component_data_private_sensitive = NFSV3StorageAccountComponent(
             f"{self._name}_storage_account_data_private_sensitive",
-            # Storage account names have a maximum of 24 characters
-            account_name=alphanumeric(
-                f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}"
-            )[:24],
-            allowed_ip_addresses=data_private_sensitive_ip_addresses,
-            allowed_service_tag=data_private_sensitive_service_tag,
-            location=props.location,
-            subnet_id=props.subnet_data_private_id,
-            resource_group_name=props.resource_group_name,
+            NFSV3StorageAccountProps(
+                # Storage account names have a maximum of 24 characters
+                account_name=alphanumeric(
+                    f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}"
+                )[:24],
+                allowed_ip_addresses=data_private_sensitive_ip_addresses,
+                allowed_service_tag=data_private_sensitive_service_tag,
+                location=props.location,
+                log_analytics_workspace=props.log_analytics_workspace,
+                subnet_id=props.subnet_data_private_id,
+                resource_group_name=props.resource_group_name,
+            ),
             opts=child_opts,
             tags=child_tags,
         )
+        storage_account_data_private_sensitive = (
+            component_data_private_sensitive.storage_account
+        )
         # Deploy storage containers
         NFSV3BlobContainerComponent(
             f"{self._name}_blob_egress",
@@ -615,6 +665,45 @@ def __init__(
             opts=child_opts,
             tags=child_tags,
         )
+        # Add diagnostic setting for files
+        insights.DiagnosticSetting(
+            f"{storage_account_data_private_user._name}_diagnostic_setting",
+            name=f"{storage_account_data_private_user._name}_diagnostic_setting",
+            log_analytics_destination_type="Dedicated",
+            logs=[
+                {
+                    "category_group": "allLogs",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+                {
+                    "category_group": "audit",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+            ],
+            metrics=[
+                {
+                    "category": "Transaction",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                }
+            ],
+            # This is the URI of the automatically created fileService resource
+            resource_uri=Output.concat(
+                storage_account_data_private_user.id, "/fileServices/default"
+            ),
+            workspace_id=props.log_analytics_workspace.id,
+        )
         storage.FileShare(
             f"{storage_account_data_private_user._name}_files_home",
             access_tier=storage.ShareAccessTier.PREMIUM,
diff --git a/data_safe_haven/infrastructure/programs/sre/desired_state.py b/data_safe_haven/infrastructure/programs/sre/desired_state.py
index c4392f5210..20f4e357f1 100644
--- a/data_safe_haven/infrastructure/programs/sre/desired_state.py
+++ b/data_safe_haven/infrastructure/programs/sre/desired_state.py
@@ -31,7 +31,9 @@
 from data_safe_haven.infrastructure.components import (
     NFSV3BlobContainerComponent,
     NFSV3BlobContainerProps,
-    WrappedNFSV3StorageAccount,
+    NFSV3StorageAccountComponent,
+    NFSV3StorageAccountProps,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.resources import resources_path
 from data_safe_haven.types import AzureDnsZoneNames
@@ -55,6 +57,7 @@ def __init__(
         ldap_user_filter: Input[str],
         ldap_user_search_base: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group: Input[resources.ResourceGroup],
         software_repository_hostname: Input[str],
         subscription_name: Input[str],
@@ -73,6 +76,7 @@ def __init__(
         self.ldap_user_filter = ldap_user_filter
         self.ldap_user_search_base = ldap_user_search_base
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg)
         self.resource_group_name = Output.from_input(resource_group).apply(
             get_name_from_rg
@@ -102,19 +106,23 @@ def __init__(
         # Deploy desired state storage account
         # - This holds the /var/local/ansible container that is mounted by workspaces
         # - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer
-        storage_account = WrappedNFSV3StorageAccount(
+        storage_component = NFSV3StorageAccountComponent(
             f"{self._name}_storage_account",
-            account_name=alphanumeric(
-                f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}"
-            )[:24],
-            allowed_ip_addresses=props.admin_ip_addresses,
-            allowed_service_tag=None,
-            location=props.location,
-            resource_group_name=props.resource_group_name,
-            subnet_id=props.subnet_desired_state_id,
+            NFSV3StorageAccountProps(
+                account_name=alphanumeric(
+                    f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}"
+                )[:24],
+                allowed_ip_addresses=props.admin_ip_addresses,
+                allowed_service_tag=None,
+                location=props.location,
+                log_analytics_workspace=props.log_analytics_workspace,
+                resource_group_name=props.resource_group_name,
+                subnet_id=props.subnet_desired_state_id,
+            ),
             opts=child_opts,
             tags=child_tags,
         )
+        storage_account = storage_component.storage_account
         # Deploy desired state share
         container_desired_state = NFSV3BlobContainerComponent(
             f"{self._name}_blob_desired_state",
diff --git a/data_safe_haven/infrastructure/programs/sre/firewall.py b/data_safe_haven/infrastructure/programs/sre/firewall.py
index 97f7a885b7..ed831e826a 100644
--- a/data_safe_haven/infrastructure/programs/sre/firewall.py
+++ b/data_safe_haven/infrastructure/programs/sre/firewall.py
@@ -3,12 +3,13 @@
 from collections.abc import Mapping
 
 from pulumi import ComponentResource, Input, Output, ResourceOptions
-from pulumi_azure_native import network
+from pulumi_azure_native import insights, network
 
 from data_safe_haven.infrastructure.common import (
     get_address_prefixes_from_subnet,
     get_id_from_subnet,
 )
+from data_safe_haven.infrastructure.components import WrappedLogAnalyticsWorkspace
 from data_safe_haven.types import (
     FirewallPriorities,
     ForbiddenDomains,
@@ -23,6 +24,7 @@ class SREFirewallProps:
     def __init__(
         self,
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         route_table_name: Input[str],
         subnet_apt_proxy_server: Input[network.GetSubnetResult],
@@ -35,6 +37,7 @@ def __init__(
         subnet_workspaces: Input[network.GetSubnetResult],
     ) -> None:
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.route_table_name = route_table_name
         self.subnet_apt_proxy_server_prefixes = Output.from_input(
@@ -331,6 +334,36 @@ def __init__(
             tags=child_tags,
         )
 
+        # Add diagnostic settings for firewall
+        # This links the firewall to the log analytics workspace
+        insights.DiagnosticSetting(
+            f"{self._name}_firewall_diagnostic_settings",
+            name="firewall_diagnostic_settings",
+            log_analytics_destination_type="Dedicated",
+            logs=[
+                {
+                    "category_group": "allLogs",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                },
+            ],
+            metrics=[
+                {
+                    "category": "AllMetrics",
+                    "enabled": True,
+                    "retention_policy": {
+                        "days": 0,
+                        "enabled": False,
+                    },
+                }
+            ],
+            resource_uri=firewall.id,
+            workspace_id=props.log_analytics_workspace.id,
+        )
+
         # Retrieve the private IP address for the firewall
         private_ip_address = firewall.ip_configurations.apply(
             lambda cfgs: "" if not cfgs else cfgs[0].private_ip_address
diff --git a/data_safe_haven/infrastructure/programs/sre/gitea_server.py b/data_safe_haven/infrastructure/programs/sre/gitea_server.py
index ab85ee51d8..2690de9c79 100644
--- a/data_safe_haven/infrastructure/programs/sre/gitea_server.py
+++ b/data_safe_haven/infrastructure/programs/sre/gitea_server.py
@@ -14,6 +14,7 @@
     LocalDnsRecordProps,
     PostgresqlDatabaseComponent,
     PostgresqlDatabaseProps,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.resources import resources_path
 from data_safe_haven.utility import FileReader
@@ -35,6 +36,7 @@ def __init__(
         ldap_user_filter: Input[str],
         ldap_user_search_base: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         sre_fqdn: Input[str],
         storage_account_key: Input[str],
@@ -55,6 +57,7 @@ def __init__(
         self.ldap_user_filter = ldap_user_filter
         self.ldap_user_search_base = ldap_user_search_base
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.sre_fqdn = sre_fqdn
         self.storage_account_key = storage_account_key
@@ -272,6 +275,12 @@ def __init__(
                     ],
                 ),
             ],
+            diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                log_analytics=containerinstance.LogAnalyticsArgs(
+                    workspace_id=props.log_analytics_workspace.workspace_id,
+                    workspace_key=props.log_analytics_workspace.workspace_key,
+                ),
+            ),
             dns_config=containerinstance.DnsConfigurationArgs(
                 name_servers=[props.dns_server_ip],
             ),
diff --git a/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py b/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py
index 24cb858e68..d35efa81c5 100644
--- a/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py
+++ b/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py
@@ -15,6 +15,7 @@
     LocalDnsRecordProps,
     PostgresqlDatabaseComponent,
     PostgresqlDatabaseProps,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.resources import resources_path
 from data_safe_haven.types import Ports
@@ -37,6 +38,7 @@ def __init__(
         ldap_user_search_base: Input[str],
         ldap_username_attribute: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         sre_fqdn: Input[str],
         storage_account_key: Input[str],
@@ -58,6 +60,7 @@ def __init__(
         self.ldap_user_search_base = ldap_user_search_base
         self.ldap_username_attribute = ldap_username_attribute
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.sre_fqdn = sre_fqdn
         self.storage_account_key = storage_account_key
@@ -253,6 +256,12 @@ def __init__(
                     ],
                 ),
             ],
+            diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                log_analytics=containerinstance.LogAnalyticsArgs(
+                    workspace_id=props.log_analytics_workspace.workspace_id,
+                    workspace_key=props.log_analytics_workspace.workspace_key,
+                ),
+            ),
             dns_config=containerinstance.DnsConfigurationArgs(
                 name_servers=[props.dns_server_ip],
             ),
diff --git a/data_safe_haven/infrastructure/programs/sre/identity.py b/data_safe_haven/infrastructure/programs/sre/identity.py
index 7839853384..4b06420190 100644
--- a/data_safe_haven/infrastructure/programs/sre/identity.py
+++ b/data_safe_haven/infrastructure/programs/sre/identity.py
@@ -13,6 +13,7 @@
 from data_safe_haven.infrastructure.components import (
     LocalDnsRecordComponent,
     LocalDnsRecordProps,
+    WrappedLogAnalyticsWorkspace,
 )
 
 
@@ -27,6 +28,7 @@ def __init__(
         entra_application_secret: Input[str],
         entra_tenant_id: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         shm_fqdn: Input[str],
         sre_fqdn: Input[str],
@@ -40,6 +42,7 @@ def __init__(
         self.entra_application_secret = entra_application_secret
         self.entra_tenant_id = entra_tenant_id
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.shm_fqdn = shm_fqdn
         self.sre_fqdn = sre_fqdn
@@ -163,6 +166,12 @@ def __init__(
                     ],
                 ),
             ],
+            diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                log_analytics=containerinstance.LogAnalyticsArgs(
+                    workspace_id=props.log_analytics_workspace.workspace_id,
+                    workspace_key=props.log_analytics_workspace.workspace_key,
+                ),
+            ),
             dns_config=containerinstance.DnsConfigurationArgs(
                 name_servers=[props.dns_server_ip],
             ),
diff --git a/data_safe_haven/infrastructure/programs/sre/remote_desktop.py b/data_safe_haven/infrastructure/programs/sre/remote_desktop.py
index e2df83ede5..ba1e8b9816 100644
--- a/data_safe_haven/infrastructure/programs/sre/remote_desktop.py
+++ b/data_safe_haven/infrastructure/programs/sre/remote_desktop.py
@@ -15,6 +15,7 @@
     FileShareFileProps,
     PostgresqlDatabaseComponent,
     PostgresqlDatabaseProps,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.resources import resources_path
 from data_safe_haven.utility import FileReader
@@ -40,6 +41,7 @@ def __init__(
         ldap_user_filter: Input[str],
         ldap_user_search_base: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         resource_group_name: Input[str],
         storage_account_key: Input[str],
         storage_account_name: Input[str],
@@ -65,6 +67,7 @@ def __init__(
         self.ldap_user_filter = ldap_user_filter
         self.ldap_user_search_base = ldap_user_search_base
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.resource_group_name = resource_group_name
         self.storage_account_key = storage_account_key
         self.storage_account_name = storage_account_name
@@ -348,6 +351,12 @@ def __init__(
                     ),
                 ),
             ],
+            diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                log_analytics=containerinstance.LogAnalyticsArgs(
+                    workspace_id=props.log_analytics_workspace.workspace_id,
+                    workspace_key=props.log_analytics_workspace.workspace_key,
+                ),
+            ),
             dns_config=containerinstance.DnsConfigurationArgs(
                 name_servers=[props.dns_server_ip],
             ),
diff --git a/data_safe_haven/infrastructure/programs/sre/software_repositories.py b/data_safe_haven/infrastructure/programs/sre/software_repositories.py
index 013c9ffcdd..be67c3e8af 100644
--- a/data_safe_haven/infrastructure/programs/sre/software_repositories.py
+++ b/data_safe_haven/infrastructure/programs/sre/software_repositories.py
@@ -1,4 +1,4 @@
-"""Pulumi component for SRE monitoring"""
+"""Pulumi component for SRE software repositories"""
 
 from collections.abc import Mapping
 
@@ -14,6 +14,7 @@
     FileShareFileProps,
     LocalDnsRecordComponent,
     LocalDnsRecordProps,
+    WrappedLogAnalyticsWorkspace,
 )
 from data_safe_haven.resources import resources_path
 from data_safe_haven.types import Ports, SoftwarePackageCategory
@@ -28,6 +29,7 @@ def __init__(
         dns_server_ip: Input[str],
         dockerhub_credentials: DockerHubCredentials,
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         nexus_admin_password: Input[str],
         resource_group_name: Input[str],
         software_packages: SoftwarePackageCategory,
@@ -39,6 +41,7 @@ def __init__(
         self.dns_server_ip = dns_server_ip
         self.dockerhub_credentials = dockerhub_credentials
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.nexus_admin_password = Output.secret(nexus_admin_password)
         self.nexus_packages: str | None = {
             SoftwarePackageCategory.ANY: "all",
@@ -250,6 +253,12 @@ def __init__(
                         ],
                     ),
                 ],
+                diagnostics=containerinstance.ContainerGroupDiagnosticsArgs(
+                    log_analytics=containerinstance.LogAnalyticsArgs(
+                        workspace_id=props.log_analytics_workspace.workspace_id,
+                        workspace_key=props.log_analytics_workspace.workspace_key,
+                    ),
+                ),
                 dns_config=containerinstance.DnsConfigurationArgs(
                     name_servers=[props.dns_server_ip],
                 ),
diff --git a/data_safe_haven/infrastructure/programs/sre/user_services.py b/data_safe_haven/infrastructure/programs/sre/user_services.py
index 5eb04bdfbb..1418b3d11f 100644
--- a/data_safe_haven/infrastructure/programs/sre/user_services.py
+++ b/data_safe_haven/infrastructure/programs/sre/user_services.py
@@ -7,6 +7,7 @@
     DockerHubCredentials,
     get_id_from_subnet,
 )
+from data_safe_haven.infrastructure.components import WrappedLogAnalyticsWorkspace
 from data_safe_haven.types import DatabaseSystem, SoftwarePackageCategory
 
 from .database_servers import SREDatabaseServerComponent, SREDatabaseServerProps
@@ -35,6 +36,7 @@ def __init__(
         ldap_user_filter: Input[str],
         ldap_user_search_base: Input[str],
         location: Input[str],
+        log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace],
         nexus_admin_password: Input[str],
         resource_group_name: Input[str],
         software_packages: SoftwarePackageCategory,
@@ -58,6 +60,7 @@ def __init__(
         self.ldap_user_filter = ldap_user_filter
         self.ldap_user_search_base = ldap_user_search_base
         self.location = location
+        self.log_analytics_workspace = log_analytics_workspace
         self.nexus_admin_password = Output.secret(nexus_admin_password)
         self.resource_group_name = resource_group_name
         self.software_packages = software_packages
@@ -109,6 +112,7 @@ def __init__(
                 ldap_user_filter=props.ldap_user_filter,
                 ldap_user_search_base=props.ldap_user_search_base,
                 location=props.location,
+                log_analytics_workspace=props.log_analytics_workspace,
                 resource_group_name=props.resource_group_name,
                 sre_fqdn=props.sre_fqdn,
                 storage_account_key=props.storage_account_key,
@@ -134,6 +138,7 @@ def __init__(
                 ldap_user_filter=props.ldap_user_filter,
                 ldap_user_search_base=props.ldap_user_search_base,
                 location=props.location,
+                log_analytics_workspace=props.log_analytics_workspace,
                 resource_group_name=props.resource_group_name,
                 sre_fqdn=props.sre_fqdn,
                 storage_account_key=props.storage_account_key,
@@ -151,6 +156,7 @@ def __init__(
                 dns_server_ip=props.dns_server_ip,
                 dockerhub_credentials=props.dockerhub_credentials,
                 location=props.location,
+                log_analytics_workspace=props.log_analytics_workspace,
                 nexus_admin_password=props.nexus_admin_password,
                 resource_group_name=props.resource_group_name,
                 sre_fqdn=props.sre_fqdn,
diff --git a/data_safe_haven/types/__init__.py b/data_safe_haven/types/__init__.py
index 728df06c19..bfe1f6898a 100644
--- a/data_safe_haven/types/__init__.py
+++ b/data_safe_haven/types/__init__.py
@@ -34,14 +34,14 @@
     "AzureDnsZoneNames",
     "AzureLocation",
     "AzurePremiumFileShareSize",
-    "AzureServiceTag",
     "AzureSdkCredentialScope",
+    "AzureServiceTag",
     "AzureSubscriptionName",
     "AzureVmSku",
     "DatabaseSystem",
     "EmailAddress",
-    "EntraApplicationId",
     "EntraAppPermissionType",
+    "EntraApplicationId",
     "EntraGroupName",
     "EntraSignInAudienceType",
     "FirewallPriorities",
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f262d36dc2..dcc77557e7 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -64,7 +64,10 @@
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
 # This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ["**/*.partial.md"]
+exclude_patterns = [
+    "**/*.partial.md",
+    "deployment/security_checklist/security_checklist_template.md",
+]
 # -- Options for HTML output -------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
diff --git a/docs/source/deployment/security_checklist.md b/docs/source/deployment/security_checklist.md
index 2737b1cb5c..b96cdc38da 100644
--- a/docs/source/deployment/security_checklist.md
+++ b/docs/source/deployment/security_checklist.md
@@ -8,6 +8,7 @@ Organisations are responsible for making their own decisions about the suitabili
 ```
 
 In this check list we aim to evaluate our deployment against the {ref}`security configuration <design_turing_security_configuration>` that we apply at the Alan Turing Institute.
+A copy of this template in Markdown format is {download}`available for download <security_checklist/security_checklist_template.md>`.
 The security checklist currently focuses on checks that can evaluate these security requirements for {ref}`policy_tier_2` (or greater) SREs (with some steps noted as specific to a tier):
 
 ## How to use this checklist
@@ -20,6 +21,8 @@ Work your way through the actions described in each section, taking care to noti
 - {{white_check_mark}} This indicates a checklist item for which a screenshot is either not appropriate or difficult
 ```
 
+You can use {download}`this template Markdown file <./security_checklist/security_checklist_template.md>` to complete the checklist.
+
 ## Prerequisites
 
 ### Roles
@@ -50,7 +53,7 @@ In each SRE configuration
 
 ### Accounts
 
-[Create a user account](../management/index.md#add-users-to-the-data-safe-haven) for the research user in your SHM.
+[Create a user account](../management/user.md#add-users-to-the-data-safe-haven) for the research user in your SHM.
 Do not register this user with any SRE yet.
 
 ## 1. Multifactor authentication and password strength
diff --git a/docs/source/deployment/security_checklist/security_checklist_template.md b/docs/source/deployment/security_checklist/security_checklist_template.md
new file mode 100644
index 0000000000..6233e56f5c
--- /dev/null
+++ b/docs/source/deployment/security_checklist/security_checklist_template.md
@@ -0,0 +1,136 @@
+# Security checklist
+
+Running on SHM/SREs deployed using commit xxxxxx
+
+## Summary
+
+- :white_check_mark: x tests passed
+- :partly_sunny: x tests partially passed (see below for more details)
+- :fast_forward: x tests skipped (see below for more details)
+- :x: x tests failed (see below for more details)
+
+## Details
+
+- Any additional details as referred to in the summary
+
+### Multifactor Authentication and Password strength
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check: Users can reset their own password
+    - <summary><b>Verify that:</b> User can reset their own password</summary>
+    <img src="…"/>
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check: non-registered users cannot connect to any SRE workspace
+    - <summary> <b>Verify that:</b> User can authenticate but cannot see any workspaces</summary>
+    <img src="…"/>
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check: registered users can see SRE workspaces
+    - <summary> <b>Verify that:</b> User can authenticate and can see workspaces</summary>
+    <img src="…"/>
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check: Authenticated user can access workspaces
+    - <summary> <b>Verify that:</b> You can connect to any workspace</i> </summary>
+    <img src="…"/>
+
+### Isolated Network
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the internet from a workspace
+    - <summary> <b>Verify that:</b> Browsing to the service fails</summary>
+    <img src="…"/>
+    - <summary> <b>Verify that:</b> You cannot access the service using curl</summary>
+    <img src="…"/>
+    - <summary> <b>Verify:</b> You cannot get the IP address for the service using nslookup</summary>
+    <img src="…"/>
+
+### User devices
+
+#### Tier 2:
+
+- Connect to the environment using an allowed IP address and credentials
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> Connection succeeds
+- Connect to the environment from an IP address that is not allowed but with correct credentials
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> Connection fails
+
+#### Tier 3:
+
+- All managed devices should be provided by a known IT team at an approved organisation.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the IT team of the approved organisation take responsibility for managing the device.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the user does not have administrator permissions on the device.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> allowed IP addresses are exclusive to managed devices.
+- Connect to the environment using an allowed IP address and credentials
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> Connection succeeds
+- Connect to the environment from an IP address that is not allowed but with correct credentials
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> Connection fails
+
+#### Tiers 2 and above:
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules permit access only from allow-listed IP addresses
+    - In the Azure portal navigate to the Guacamole application gateway NSG for this SRE shm-<SHM NAME>-sre-<SRE NAME>-nsg-application-gateway
+    - <summary> <b>Verify that:</b> the NSG has network rules allowing Inbound access from allowed IP addresses only</summary>
+    <img src="…"/>
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: all other NSGs have an inbound Deny All rule and no higher priority rule allowing inbound connections from outside the Virtual Network
+
+### Physical security
+
+#### Tier 3 only
+
+- Attempt to connect to the Tier 3 SRE web client from home using a managed device and the correct VPN connection and credentials.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that</b>: connection fails.
+- Attempt to connect from research office using a managed device and the correct VPN connection and credentials.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that</b>: connection succeeds
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that</b>: the network IP ranges corresponding to the research spaces correspond to those allowed by storage account firewall
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that</b>: physical measures such as screen adaptions or desk partitions are present if risk of visual eavesdropping is high
+
+### Remote connections
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to connect as a user to the remote desktop server via SSH
+    - <summary> <b>Verify that:</b> SSH login by fully-qualified domain name fails</summary>
+    <img src="…"/>
+    - <summary> <b>Verify that:</b> SSH login by public IP address fails</summary>
+    <img src="…"/>
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the remote desktop web client application gateway (shm-<SHM ID>-sre-<SRE ID>-ag-entrypoint) and the firewall are the only SRE resources with public IP addresses.
+
+### Copy-and-paste
+
+- Unable to paste text from a local device into a workspace
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> paste fails
+- Unable to copy text from a workspace to a local device
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> paste fails
+
+### Data ingress
+
+- Check that the **System Manager** can send an upload token to the **Dataset Provider Representative**
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the upload token is successfully created.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> you are able to send this token using a secure mechanism.
+- Ensure that data ingress works only for connections from the accepted IP address range
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> writing succeeds by uploading a file
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> attempting to open or download any of the files results in the following error: "Failed to start transfer: Insufficient credentials" under the Activities pane at the bottom of the MS Azure Storage Explorer window.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the access token fails when using a device with a non-allowed IP address
+- Check that the upload fails if the token has expired
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> you can connect and write with the token during the duration
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> you cannot connect and write with the token after the duration has expired
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b>the data ingress process works by uploading different kinds of files, e.g. data, images, scripts (if appropriate)
+
+### Data egress
+
+- Confirm that a non-privileged user is able to read the different storage volumes and write to output
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the `/mnt/output` volume exists and can be written to
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> the permissions of other storage volumes match that described in the user guide
+- Confirm that <b>System Manager</b> can see and download files from output
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> you can see the files written to the `/mnt/output` storage volume.
+    - :white_check_mark:/:partly_sunny:/:fast_forward:/:x: <b>Verify that:</b> a written file can be taken out of the environment via download
+
+### Software package repositories
+
+#### Tier 2:
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install any packages
+    - <summary> <b>Verify that:</b> pytz can be installed</summary>
+    <img src="…"/>
+    - <summary> <b>Verify that:</b> awscli can be installed</summary>
+    <img src="…"/>
+
+#### Tier 3:
+
+- :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install only allow-listed packages
+    - <summary> <b>Verify:</b> pytz can be installed</summary>
+    <img src="…"/>
+    - <summary> <b>Verify:</b> awscli cannot be installed</summary>
+    <img src="…"/>
diff --git a/docs/source/management/data.md b/docs/source/management/data.md
new file mode 100644
index 0000000000..9cacaf3806
--- /dev/null
+++ b/docs/source/management/data.md
@@ -0,0 +1,72 @@
+# Managing data ingress and egress
+
+## Data ingress
+
+It is the {ref}`role_data_provider_representative`'s responsibility to upload the data required by the safe haven.
+
+The following steps show how to generate a temporary, write-only upload token that can be securely sent to the {ref}`role_data_provider_representative`, enabling them to upload the data:
+
+- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM
+- Search for the resource group: `shm-<YOUR_SHM_NAME>-sre-<YOUR_SRE_NAME>-rg`, then click through to the storage account ending with `sensitivedata`
+- Browse to **{menuselection}`Settings --> Networking`** and ensure that the data provider's IP address is one of those allowed under the **Firewall** header
+    - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre`
+- Browse to **{menuselection}`Data storage --> Containers`** from the menu on the left hand side
+- Click **ingress**
+- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following:
+    - Under **Signing method**, select **User delegation key**
+    - Under **Permissions**, check these boxes:
+        - **Write**
+        - **List**
+    - Set a 24 hour time window in the **Start and expiry date/time** (or an appropriate length of time)
+    - Leave everything else as default and click **{guilabel}`Generate SAS token and URL`**
+    - Copy the **Blob SAS URL**
+
+      ```{image} ingress_token_write_only.png
+      :alt: write-only SAS token
+      :align: center
+      ```
+
+- Send the **Blob SAS URL** to the data provider through a secure channel
+- The data provider should now be able to upload data
+- Validate successful data ingress
+    - Browse to **{menuselection}`Data storage --> Containers`** (in the middle of the page)
+    - Select the **ingress** container and ensure that the uploaded files are present
+
+## Data egress
+
+```{important}
+Assessment of output must be completed **before** an egress link is created.
+Outputs are potentially sensitive, and so an appropriate process must be applied to ensure that they are suitable for egress.
+```
+
+The {ref}`role_system_manager` creates a time-limited and IP restricted link to remove data from the environment.
+
+- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM
+- Search for the resource group: `shm-<YOUR_SHM_NAME>-sre-<YOUR_SRE_NAME>-rg`, then click through to the storage account ending with `sensitivedata`
+- Browse to **{menuselection}`Settings --> Networking`** and check the list of pre-approved IP addresses allowed under the **Firewall** header
+    - Ensure that the IP address of the person to receive the outputs is listed
+    - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre`
+- Browse to **{menuselection}`Data storage --> Containers`**
+- Select the **egress** container
+- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following:
+    - Under **Signing method**, select **User delegation key**
+    - Under **Permissions**, check these boxes:
+        - **Read**
+        - **List**
+    - Set a time window in the **Start and expiry date/time** that gives enough time for the person who will perform the secure egress download to do so
+    - Leave everything else as default and press **{guilabel}`Generate SAS token and URL`**
+    - Copy the **Blob SAS URL**
+
+      ```{image} egress_token_read_only.png
+      :alt: Read-only SAS token
+      :align: center
+      ```
+
+- Send the **Blob SAS URL** to the relevant person through a secure channel
+- The appropriate person should now be able to download data
+
+## The output volume
+
+Once you have set up the egress connection in Azure Storage Explorer, you should be able to view data from the **output volume**, a read-write area intended for the extraction of results, such as figures for publication.
+On the workspaces, this volume is `/mnt/output` and is shared between all workspaces in an SRE.
+For more information on shared SRE storage volumes, consult the {ref}`Safe Haven User Guide <role_researcher_shared_storage>`.
diff --git a/docs/source/management/index.md b/docs/source/management/index.md
index 234fe8136a..f8cd8ac0e0 100644
--- a/docs/source/management/index.md
+++ b/docs/source/management/index.md
@@ -1,252 +1,12 @@
 # Management
 
-## Managing users
+:::{toctree}
+:hidden:
 
-### Add users to the Data Safe Haven
-
-:::{important}
-You will need a full name, phone number, email address and country for each user.
-:::
-
-1. You can add users directly in your Entra tenant, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users).
-
-2. Alternatively, you can add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`).
-    - (Optional) you can provide a `Domain` column if you like but this will otherwise default to the domain of your SHM
-    - {{warning}} **Phone** must be in [E.123 international format](https://en.wikipedia.org/wiki/E.123)
-    - {{warning}} **CountryCode** is the two letter [ISO 3166-1 Alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements) code for the country where the user is based
-
-::::{admonition} Example CSV user file
-:class: dropdown tip
-
-:::{code} text
-GivenName;Surname;Phone;Email;CountryCode
-Ada;Lovelace;+44800456456;ada@lovelace.me;GB
-Grace;Hopper;+18005550100;grace@nasa.gov;US
-:::
-::::
-
-```{code} shell
-$ dsh users add PATH_TO_MY_CSV_FILE
-```
-
-### List available users
-
-- You can do this from the [Microsoft Entra admin centre](https://entra.microsoft.com/)
-
-    1. Browse to **{menuselection}`Groups --> All Groups`**
-    2. Click on the group named **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users**
-    3. Browse to **{menuselection}`Manage --> Members`** from the secondary menu on the left side
-
-- You can do this at the command line by running the following command:
-
-    ```{code} shell
-    $ dsh users list YOUR_SRE_NAME
-    ```
-
-    which will give output like the following
-
-    ```
-    ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓
-    ┃ username                     ┃ Entra ID ┃ SRE YOUR_SRE_NAME ┃
-    ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩
-    │ ada.lovelace                 │ x        │ x                 │
-    │ grace.hopper                 │ x        │ x                 │
-    │ ursula.franklin              │ x        │                   │
-    │ joan.clarke                  │ x        │                   │
-    └──────────────────────────────┴──────────┴───────────────────┘
-    ```
-
-### Assign existing users to an SRE
-
-1. You can do this directly in your Entra tenant by adding them to the **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** group, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/groups-view-azure-portal#add-a-group-member).
-
-2. Alternatively, you can add multiple users from the command line:
-
-    ```{code} shell
-    $ dsh users register YOUR_SRE_NAME -u USERNAME_1 -u USERNAME_2
-    ```
-
-    where you must specify the usernames for each user you want to add to this SRE.
-
-    :::{important}
-    Do not include the Entra ID domain part of the username, just the part before the @.
-    :::
-
-### Manually register users for self-service password reset
-
-:::{tip}
-Users created via the `dsh users` command line tool will be automatically registered for SSPR.
+user.md
+sre.md
+data.md
+logs.md
 :::
 
-If you have manually created a user and want to enable SSPR, do the following
-
-- Go to the [Microsoft Entra admin centre](https://entra.microsoft.com/)
-- Browse to **{menuselection}`Users --> All Users`**
-- Select the user you want to enable SSPR for
-- On the **{menuselection}`Manage --> Authentication Methods`** page fill out their contact info as follows:
-    - Ensure that you register **both** a phone number and an email address
-        - **Phone:** add the user's phone number with a space between the country code and the rest of the number (_e.g._ +44 7700900000)
-        - **Email:** enter the user's email address here
-    - Click the **{guilabel}`Save`** icon in the top panel
-
-## Managing SREs
-
-### List available SRE configurations and deployment status
-
-- Run the following if you want to check what SRE configurations are available in the current context, and whether those SREs are deployed
-
-```{code} shell
-$ dsh config available
-```
-
-which will give output like the following
-
-```{code} shell
-Available SRE configurations for context 'green':
-┏━━━━━━━━━━━━━━┳━━━━━━━━━━┓
-┃ SRE Name     ┃ Deployed ┃
-┡━━━━━━━━━━━━━━╇━━━━━━━━━━┩
-│ emerald      │ x        │
-│ jade         │          │
-│ olive        │          │
-└──────────────┴──────────┘
-```
-
-### Remove a deployed Data Safe Haven
-
-- Run the following if you want to teardown a deployed SRE:
-
-```{code} shell
-$ dsh sre teardown YOUR_SRE_NAME
-```
-
-::::{admonition} Tearing down an SRE is destructive and irreversible
-:class: danger
-Running `dsh sre teardown` will destroy **all** resources deployed within the SRE.
-Ensure that any desired outputs have been extracted before deleting the SRE.
-**All** data remaining on the SRE will be deleted.
-The user groups for the SRE on Microsoft Entra ID will also be deleted.
-::::
-
-- Run the following if you want to teardown the deployed SHM:
-
-```{code} shell
-$ dsh shm teardown
-```
-
-::::{admonition} Tearing down an SHM
-:class: warning
-Tearing down the SHM permanently deletes **all** remotely stored configuration and state data.
-Tearing down the SHM also renders the SREs inaccessible to users and prevents them from being fully managed using the CLI.
-All SREs associated with the SHM should be torn down before the SHM is torn down.
-::::
-
-### Updating SREs
-
-SREs are modified by updating the configuration then running the deploy command.
-
-- The existing configuration for the SRE can be shown using the following:
-
-```{code} shell
-$ dsh config show YOUR_SRE_NAME
-```
-
-- If you do not have a local copy, you can write one with the `--file` option:
-
-```{code} shell
-$ dsh config show YOUR_SRE_NAME --file YOUR_SRE_NAME.yaml
-```
-
-- Edit the configuration file locally, and upload the new version:
-
-```{code} shell
-$ dsh config upload YOUR_SRE_NAME.yaml
-```
-
-- You will be shown the differences between the existing configuration and the new configuration and asked to confirm that they are correct.
-- Finally, deploy your SRE to apply any changes:
-
-```{code} shell
-$ dsh sre deploy YOUR_SRE_NAME
-```
-
-::::{admonition} Changing administrator IP addresses
-:class: warning
-The administrator IP addresses declared in the SRE configuration are used to create access rules for SRE infrastructure.
-Therefore, after an SRE has been deployed, some changes can only be made from IP addresses on that list.
-
-As a consequence, if you want to update the list of administrator IP addresses, for example to add a new administrator, you must do so from an IP address that is already allowed.
-::::
-
-## Managing data ingress and egress
-
-### Data Ingress
-
-It is the {ref}`role_data_provider_representative`'s responsibility to upload the data required by the safe haven.
-
-The following steps show how to generate a temporary, write-only upload token that can be securely sent to the {ref}`role_data_provider_representative`, enabling them to upload the data:
-
-- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM
-- Search for the resource group: `shm-<YOUR_SHM_NAME>-sre-<YOUR_SRE_NAME>-rg`, then click through to the storage account ending with `sensitivedata`
-- Browse to **{menuselection}`Settings --> Networking`** and ensure that the data provider's IP address is one of those allowed under the **Firewall** header
-    - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre`
-- Browse to **{menuselection}`Data storage --> Containers`** from the menu on the left hand side
-- Click **ingress**
-- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following:
-    - Under **Signing method**, select **User delegation key**
-    - Under **Permissions**, check these boxes:
-        - **Write**
-        - **List**
-    - Set a 24 hour time window in the **Start and expiry date/time** (or an appropriate length of time)
-    - Leave everything else as default and click **{guilabel}`Generate SAS token and URL`**
-    - Copy the **Blob SAS URL**
-
-      ```{image} ingress_token_write_only.png
-      :alt: write-only SAS token
-      :align: center
-      ```
-
-- Send the **Blob SAS URL** to the data provider through a secure channel
-- The data provider should now be able to upload data
-- Validate successful data ingress
-    - Browse to **{menuselection}`Data storage --> Containers`** (in the middle of the page)
-    - Select the **ingress** container and ensure that the uploaded files are present
-
-### Data egress
-
-```{important}
-Assessment of output must be completed **before** an egress link is created.
-Outputs are potentially sensitive, and so an appropriate process must be applied to ensure that they are suitable for egress.
-```
-
-The {ref}`role_system_manager` creates a time-limited and IP restricted link to remove data from the environment.
-
-- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM
-- Search for the resource group: `shm-<YOUR_SHM_NAME>-sre-<YOUR_SRE_NAME>-rg`, then click through to the storage account ending with `sensitivedata`
-- Browse to **{menuselection}`Settings --> Networking`** and check the list of pre-approved IP addresses allowed under the **Firewall** header
-    - Ensure that the IP address of the person to receive the outputs is listed
-    - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre`
-- Browse to **{menuselection}`Data storage --> Containers`**
-- Select the **egress** container
-- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following:
-    - Under **Signing method**, select **User delegation key**
-    - Under **Permissions**, check these boxes:
-        - **Read**
-        - **List**
-    - Set a time window in the **Start and expiry date/time** that gives enough time for the person who will perform the secure egress download to do so
-    - Leave everything else as default and press **{guilabel}`Generate SAS token and URL`**
-    - Copy the **Blob SAS URL**
-
-      ```{image} egress_token_read_only.png
-      :alt: Read-only SAS token
-      :align: center
-      ```
-
-- Send the **Blob SAS URL** to the relevant person through a secure channel
-- The appropriate person should now be able to download data
-
-### The output volume
-
-Once you have set up the egress connection in Azure Storage Explorer, you should be able to view data from the **output volume**, a read-write area intended for the extraction of results, such as figures for publication.
-On the workspaces, this volume is `/mnt/output` and is shared between all workspaces in an SRE.
-For more information on shared SRE storage volumes, consult the {ref}`Safe Haven User Guide <role_researcher_shared_storage>`.
+Running a secure and productive Data Safe Haven requires a manager to conduct tasks which support users and to monitor the correct operation of the TRE.
diff --git a/docs/source/management/logs.md b/docs/source/management/logs.md
new file mode 100644
index 0000000000..10b9bfb0e5
--- /dev/null
+++ b/docs/source/management/logs.md
@@ -0,0 +1,128 @@
+# Monitoring logs
+
+Logs are collected for numerous parts of a Data Safe Haven.
+Some of these logs are ingested into a central location, an Azure [Log Analytics Workspace](https://learn.microsoft.com/azure/azure-monitor/logs/log-analytics-workspace-overview), and others are stored separately.
+
+## Log workspace
+
+Each SRE has its own Log Analytics Workspace.
+You can view the workspaces by going to the Azure portal and navigating to [Log Analytics Workspaces](https://portal.azure.com/#browse/Microsoft.OperationalInsights%2Fworkspaces).
+Select which Log Analytics Workspace you want to view by clicking on the workspace named `shm-<YOUR_SHM_NAME>-sre-<YOUR_SRE_NAME>-log`.
+
+The logs can be filtered using [Kusto Query Language (KQL)](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/log-query-overview).
+
+## Storage logs
+
+Depending on how different parts of Data Safe Haven storage are provisioned, logs may differ.
+
+### Sensitive data logs
+
+The sensitive data containers are the [ingress and egress containers](./data.md).
+Logs from these containers are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageBlobLogs`
+: Events occurring on the blob containers.
+: For example data being uploaded, extracted or read.
+
+`AzureMetrics`
+: Various metrics on blob container utilisation and performance.
+: This table is not reserved for the sensitive data containers and other resources may log to it.
+
+### Desired state data logs
+
+The desired state container holds the data necessary to configure virtual machines in an SRE.
+Logs from the desired state container are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageBlobLogs`
+: Events occurring on the blob containers.
+: For example data being uploaded, extracted or read.
+
+`AzureMetrics`
+: Various metrics on blob container utilisation and performance.
+: This table is not reserved for the desired state data container and other resources may log to it.
+
+### User data logs
+
+The user data file share holds the {ref}`researchers'<role_researcher>` [home directories](https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s08.html), where they will store their personal data and configuration.
+Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageFileLogs`
+: NFS events occurring on the file share.
+: For example data being written or directories being accessed
+
+`AzureMetrics`
+: Various metrics on file share utilisation and performance.
+: This table is not reserved for the user data share and other resources may log to it.
+
+### Configuration data logs
+
+There are multiple configuration data file shares.
+Each contains the configuration and state data for the Data Safe Haven [services deployed as containers](#container-logs).
+Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`StorageFileLogs`
+: SMB events occurring on the file share.
+: For example data being written or directories being accessed
+
+`AzureMetrics`
+: Various metrics on file share utilisation and performance.
+: This table is not reserved for the configuration data shares and other resources may log to it.
+
+## Container logs
+
+Some of the Data Safe Haven infrastructure is provisioned as containers.
+These include,
+
+- remote desktop portal
+- package proxy
+- Gitea and Hedgedoc
+
+Logs from all containers are ingested into the [SRE's log analytics workspace](#log-workspace).
+There are two tables,
+
+`ContainerEvents_CL`
+: Event logs for the container instance resources such as starting, stopping, crashes and pulling images.
+
+`ContainerInstanceLog_CL`
+: Container process logs.
+: This is where you can view the output of the containerised applications and will be useful for debugging problems.
+
+## Workspace logs
+
+Logs from all user workspaces are ingested into the [SRE's log analytics workspace](#log-workspace) using the [Azure Monitor Agent](https://learn.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-overview).
+
+There are three tables,
+
+`Perf`
+: Usage statistics for individual workspaces, such as percent memory used and percent disk space used.
+
+`Syslog`
+: [syslog](https://www.paessler.com/it-explained/syslog) events from workspaces.
+: Syslog is the _de facto_ standard protocol for logging on Linux and most applications will log to it.
+: These logs will be useful for debugging problems with the workspace or workspace software.
+
+`Heartbeat`
+: Verification that the Azure Monitor Agent is present on the workspaces and is able to connect to the [log analytics workspace](#log-workspace).
+
+## Firewall logs
+
+The firewall plays a critical role in the security of a Data Safe Haven.
+It filters all outbound traffic through a set of FQDN rules so that each component may only reach necessary and allowed domains.
+
+Logs from the firewall are ingested into the [SREs log workspace](#log-workspace).
+There are three tables,
+
+`AZFWApplicationRule`
+: Logs from the firewalls FDQN filters.
+: Shows requests to the outside of the Data Safe Haven and why they have been approved or rejected.
+
+`AZFWDnsQuery`
+: DNS requests handled by the firewall.
+
+`AzureMetrics`
+: Various metrics on firewall utilisation and performance.
+: This table is not reserved for the firewall and other resources may log to it.
diff --git a/docs/source/management/sre.md b/docs/source/management/sre.md
new file mode 100644
index 0000000000..aab94c31a1
--- /dev/null
+++ b/docs/source/management/sre.md
@@ -0,0 +1,88 @@
+# Managing SREs
+
+## List available SRE configurations and deployment status
+
+- Run the following if you want to check what SRE configurations are available in the current context, and whether those SREs are deployed
+
+```{code} shell
+$ dsh config available
+```
+
+which will give output like the following
+
+```{code} shell
+Available SRE configurations for context 'green':
+┏━━━━━━━━━━━━━━┳━━━━━━━━━━┓
+┃ SRE Name     ┃ Deployed ┃
+┡━━━━━━━━━━━━━━╇━━━━━━━━━━┩
+│ emerald      │ x        │
+│ jade         │          │
+│ olive        │          │
+└──────────────┴──────────┘
+```
+
+## Remove a deployed Data Safe Haven
+
+- Run the following if you want to teardown a deployed SRE:
+
+```{code} shell
+$ dsh sre teardown YOUR_SRE_NAME
+```
+
+::::{admonition} Tearing down an SRE is destructive and irreversible
+:class: danger
+Running `dsh sre teardown` will destroy **all** resources deployed within the SRE.
+Ensure that any desired outputs have been extracted before deleting the SRE.
+**All** data remaining on the SRE will be deleted.
+The user groups for the SRE on Microsoft Entra ID will also be deleted.
+::::
+
+- Run the following if you want to teardown the deployed SHM:
+
+```{code} shell
+$ dsh shm teardown
+```
+
+::::{admonition} Tearing down an SHM
+:class: warning
+Tearing down the SHM permanently deletes **all** remotely stored configuration and state data.
+Tearing down the SHM also renders the SREs inaccessible to users and prevents them from being fully managed using the CLI.
+All SREs associated with the SHM should be torn down before the SHM is torn down.
+::::
+
+## Updating SREs
+
+SREs are modified by updating the configuration then running the deploy command.
+
+- The existing configuration for the SRE can be shown using the following:
+
+```{code} shell
+$ dsh config show YOUR_SRE_NAME
+```
+
+- If you do not have a local copy, you can write one with the `--file` option:
+
+```{code} shell
+$ dsh config show YOUR_SRE_NAME --file YOUR_SRE_NAME.yaml
+```
+
+- Edit the configuration file locally, and upload the new version:
+
+```{code} shell
+$ dsh config upload YOUR_SRE_NAME.yaml
+```
+
+- You will be shown the differences between the existing configuration and the new configuration and asked to confirm that they are correct.
+- Finally, deploy your SRE to apply any changes:
+
+```{code} shell
+$ dsh sre deploy YOUR_SRE_NAME
+```
+
+::::{admonition} Changing administrator IP addresses
+:class: warning
+The administrator IP addresses declared in the SRE configuration are used to create access rules for SRE infrastructure.
+Therefore, after an SRE has been deployed, some changes can only be made from IP addresses on that list.
+
+As a consequence, if you want to update the list of administrator IP addresses, for example to add a new administrator, you must do so from an IP address that is already allowed.
+::::
diff --git a/docs/source/management/user.md b/docs/source/management/user.md
new file mode 100644
index 0000000000..d996321162
--- /dev/null
+++ b/docs/source/management/user.md
@@ -0,0 +1,84 @@
+# Managing users
+
+## Add users to the Data Safe Haven
+
+:::{important}
+You will need a full name, phone number, email address and country for each user.
+:::
+
+1. You can add users directly in your Entra tenant, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users).
+1. Alternatively, you can add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`).
+    - (Optional) you can provide a `Domain` column if you like but this will otherwise default to the domain of your SHM
+    - {{warning}} **Phone** must be in [E.123 international format](https://en.wikipedia.org/wiki/E.123)
+    - {{warning}} **CountryCode** is the two letter [ISO 3166-1 Alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements) code for the country where the user is based
+
+::::{admonition} Example CSV user file
+:class: dropdown tip
+
+:::{code} text
+GivenName;Surname;Phone;Email;CountryCode
+Ada;Lovelace;+44800456456;ada@lovelace.me;GB
+Grace;Hopper;+18005550100;grace@nasa.gov;US
+:::
+::::
+
+```{code} shell
+$ dsh users add PATH_TO_MY_CSV_FILE
+```
+
+## List available users
+
+- You can do this from the [Microsoft Entra admin centre](https://entra.microsoft.com/)
+    1. Browse to **{menuselection}`Groups --> All Groups`**
+    1. Click on the group named **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users**
+    1. Browse to **{menuselection}`Manage --> Members`** from the secondary menu on the left side
+- You can do this at the command line by running the following command:
+
+    ```{code} shell
+    $ dsh users list YOUR_SRE_NAME
+    ```
+
+    which will give output like the following
+
+    ```
+    ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━┓
+    ┃ username                     ┃ Entra ID ┃ SRE YOUR_SRE_NAME ┃
+    ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━┩
+    │ ada.lovelace                 │ x        │ x                 │
+    │ grace.hopper                 │ x        │ x                 │
+    │ ursula.franklin              │ x        │                   │
+    │ joan.clarke                  │ x        │                   │
+    └──────────────────────────────┴──────────┴───────────────────┘
+    ```
+
+## Assign existing users to an SRE
+
+1. You can do this directly in your Entra tenant by adding them to the **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** group, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/groups-view-azure-portal#add-a-group-member).
+1. Alternatively, you can add multiple users from the command line:
+
+    ```{code} shell
+    $ dsh users register YOUR_SRE_NAME -u USERNAME_1 -u USERNAME_2
+    ```
+
+    where you must specify the usernames for each user you want to add to this SRE.
+
+    :::{important}
+    Do not include the Entra ID domain part of the username, just the part before the @.
+    :::
+
+## Manually register users for self-service password reset
+
+:::{tip}
+Users created via the `dsh users` command line tool will be automatically registered for SSPR.
+:::
+
+If you have manually created a user and want to enable SSPR, do the following
+
+- Go to the [Microsoft Entra admin centre](https://entra.microsoft.com/)
+- Browse to **{menuselection}`Users --> All Users`**
+- Select the user you want to enable SSPR for
+- On the **{menuselection}`Manage --> Authentication Methods`** page fill out their contact info as follows:
+    - Ensure that you register **both** a phone number and an email address
+        - **Phone:** add the user's phone number with a space between the country code and the rest of the number (_e.g._ +44 7700900000)
+        - **Email:** enter the user's email address here
+    - Click the **{guilabel}`Save`** icon in the top panel
diff --git a/docs/source/overview/sensitivity_tiers.md b/docs/source/overview/sensitivity_tiers.md
index 4aef9a32fe..995be6ab87 100644
--- a/docs/source/overview/sensitivity_tiers.md
+++ b/docs/source/overview/sensitivity_tiers.md
@@ -49,7 +49,7 @@ Non-technical restrictions related to information governance procedures may also
 - connections to the in-browser remote desktop can only be made from an agreed set of IP addresses
 - outbound connections to the internet from inside the environment are not possible
 - copy-and-paste between the environment and the user's device is not possible
-- access to all packages on PyPI and CRAN is made available through a proxy or mirror server
+- access to all packages on PyPI and CRAN is made available through a proxy server
 
 Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs.
 
@@ -63,7 +63,7 @@ At the Turing connections to Tier 2 environments are only permitted from **Organ
 
 **Tier 3** environments impose the following technical controls on top of what is required at {ref}`policy_tier_2`.
 
-- a partial replica of agreed PyPI and CRAN packages is made available through a proxy or mirror server
+- an agreed subset of PyPI and CRAN packages is made available through a proxy server
 
 Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs.
 
diff --git a/pyproject.toml b/pyproject.toml
index 9304f1bd32..dc9f8d41dc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -42,20 +42,20 @@ dependencies = [
   "azure-storage-file-datalake==12.18.0",
   "azure-storage-file-share==12.20.0",
   "chevron==0.14.0",
-  "cryptography==43.0.3",
+  "cryptography==44.0.0",
   "fqdn==1.5.1",
   "psycopg[binary]==3.1.19", # needed for installation on older MacOS versions
-  "pulumi-azure-native==2.72.0",
+  "pulumi-azure-native==2.74.0",
   "pulumi-azuread==6.0.1",
   "pulumi-random==4.16.7",
-  "pulumi==3.139.0",
-  "pydantic==2.9.2",
-  "pyjwt[crypto]==2.10.0",
+  "pulumi==3.142.0",
+  "pydantic==2.10.2",
+  "pyjwt[crypto]==2.10.1",
   "pytz==2024.2",
   "pyyaml==6.0.2",
   "rich==13.9.4",
   "simple-acme-dns==3.2.0",
-  "typer==0.13.0",
+  "typer==0.14.0",
   "websocket-client==1.8.0",
 ]
 
@@ -73,13 +73,13 @@ docs = [
   "sphinx==8.1.3",
 ]
 lint = [
-  "ansible-dev-tools==24.10.2",
-  "ansible==10.6.0",
+  "ansible-dev-tools==24.11.0",
+  "ansible==11.0.0",
   "black==24.10.0",
   "mypy==1.13.0",
-  "pandas-stubs==2.2.3.241009",
-  "pydantic==2.9.2",
-  "ruff==0.7.4",
+  "pandas-stubs==2.2.3.241126",
+  "pydantic==2.10.2",
+  "ruff==0.8.1",
   "types-appdirs==1.4.3.5",
   "types-chevron==0.14.2.20240310",
   "types-pytz==2024.2.0.20241003",
@@ -87,10 +87,10 @@ lint = [
   "types-requests==2.32.0.20241016",
 ]
 test = [
-  "coverage==7.6.7",
+  "coverage==7.6.8",
   "freezegun==1.5.1",
   "pytest-mock==3.14.0",
-  "pytest==8.3.3",
+  "pytest==8.3.4",
   "requests-mock==1.12.1",
 ]
 
@@ -125,6 +125,7 @@ features = ["docs"]
 
 [tool.hatch.envs.docs.scripts]
 build = "sphinx-build -M html docs/source/ docs/build/ --fail-on-warning"
+clean = "rm -r docs/build"
 lint = "mdl --style .mdlstyle.rb ./docs/source"
 
 [tool.hatch.envs.lint]
@@ -158,7 +159,7 @@ pip-compile-constraint = "default"
 features = ["test"]
 
 [tool.hatch.envs.test.scripts]
-test = "coverage run -m pytest {args: tests}"
+test = "coverage run -m pytest {args:} ./tests"
 test-report = "coverage report {args:}"
 test-coverage = ["test", "test-report"]
 
diff --git a/tests/commands/conftest.py b/tests/commands/conftest.py
index d675398bfc..de60eb29d0 100644
--- a/tests/commands/conftest.py
+++ b/tests/commands/conftest.py
@@ -1,6 +1,8 @@
 from pytest import fixture
 from typer.testing import CliRunner
 
+from data_safe_haven.administration.users.entra_users import EntraUsers
+from data_safe_haven.administration.users.research_user import ResearchUser
 from data_safe_haven.config import (
     Context,
     ContextManager,
@@ -260,3 +262,14 @@ def tmp_contexts_none(tmp_path, context_yaml):
     with open(config_file_path, "w") as f:
         f.write(context_yaml)
     return tmp_path
+
+
+@fixture
+def mock_entra_user_list(mocker):
+    test_user = ResearchUser(
+        given_name="Harry",
+        surname="Lime",
+        sam_account_name="harry.lime",
+        user_principal_name="harry.lime@acme.testing",
+    )
+    mocker.patch.object(EntraUsers, "list", return_value=[test_user])
diff --git a/tests/commands/test_sre.py b/tests/commands/test_sre.py
index a13518a878..9d2f79d07c 100644
--- a/tests/commands/test_sre.py
+++ b/tests/commands/test_sre.py
@@ -5,7 +5,7 @@
 from data_safe_haven.commands.sre import sre_command_group
 from data_safe_haven.config import Context, ContextManager
 from data_safe_haven.exceptions import DataSafeHavenAzureError
-from data_safe_haven.external import AzureSdk
+from data_safe_haven.external import AzureSdk, GraphApi
 
 
 class TestDeploySRE:
@@ -31,13 +31,17 @@ def test_no_application(
         self,
         caplog: LogCaptureFixture,
         runner: CliRunner,
+        mocker,
         mock_azuresdk_get_subscription_name,  # noqa: ARG002
         mock_contextmanager_assert_context,  # noqa: ARG002
         mock_ip_1_2_3_4,  # noqa: ARG002
         mock_pulumi_config_from_remote_or_create,  # noqa: ARG002
         mock_shm_config_from_remote,  # noqa: ARG002
         mock_sre_config_from_remote,  # noqa: ARG002
+        mock_graphapi_get_credential,  # noqa: ARG002
     ) -> None:
+        mocker.patch.object(GraphApi, "get_application_by_name", return_value=None)
+
         result = runner.invoke(sre_command_group, ["deploy", "sandbox"])
         assert result.exit_code == 1
         assert (
diff --git a/tests/commands/test_users.py b/tests/commands/test_users.py
index c1b183c922..5c11e29cc9 100644
--- a/tests/commands/test_users.py
+++ b/tests/commands/test_users.py
@@ -52,6 +52,26 @@ def test_invalid_shm(
         assert result.exit_code == 1
         assert "Have you deployed the SHM?" in result.stdout
 
+    def test_mismatched_domain(
+        self,
+        mock_graphapi_get_credential,  # noqa: ARG002
+        mock_pulumi_config_no_key_from_remote,  # noqa: ARG002
+        mock_shm_config_from_remote,  # noqa: ARG002
+        mock_sre_config_from_remote,  # noqa: ARG002
+        mock_entra_user_list,  # noqa: ARG002
+        runner,
+        tmp_contexts,  # noqa: ARG002
+    ):
+        result = runner.invoke(
+            users_command_group, ["register", "-u", "harry.lime", "sandbox"]
+        )
+
+        assert result.exit_code == 0
+        assert (
+            "principal domain name must match the domain of the SRE to be registered"
+            in result.stdout
+        )
+
     def test_invalid_sre(
         self,
         mock_pulumi_config_from_remote,  # noqa: ARG002
diff --git a/tests/conftest.py b/tests/conftest.py
index 5a8ce42847..8734d39ba1 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -29,7 +29,10 @@
 )
 from data_safe_haven.exceptions import DataSafeHavenAzureError
 from data_safe_haven.external import AzureSdk, PulumiAccount
-from data_safe_haven.external.api.credentials import AzureSdkCredential
+from data_safe_haven.external.api.credentials import (
+    AzureSdkCredential,
+    GraphApiCredential,
+)
 from data_safe_haven.infrastructure import SREProjectManager
 from data_safe_haven.infrastructure.project_manager import ProjectManager
 from data_safe_haven.logging import init_logging
@@ -215,6 +218,19 @@ def mock_azuresdk_get_subscription_name(mocker):
     )
 
 
+@fixture
+def mock_graphapi_get_credential(mocker):
+    class MockCredential(TokenCredential):
+        def get_token(*args, **kwargs):  # noqa: ARG002
+            return AccessToken("dummy-token", 0)
+
+    mocker.patch.object(
+        GraphApiCredential,
+        "get_credential",
+        return_value=MockCredential(),
+    )
+
+
 @fixture
 def mock_azuresdk_get_credential(mocker):
     class MockCredential(TokenCredential):
diff --git a/tests/external/api/test_credentials.py b/tests/external/api/test_credentials.py
index c0e631e912..e57bdb324b 100644
--- a/tests/external/api/test_credentials.py
+++ b/tests/external/api/test_credentials.py
@@ -5,7 +5,10 @@
 )
 
 from data_safe_haven.directories import config_dir
-from data_safe_haven.exceptions import DataSafeHavenAzureError
+from data_safe_haven.exceptions import (
+    DataSafeHavenAzureError,
+    DataSafeHavenCachedCredentialError,
+)
 from data_safe_haven.external.api.credentials import (
     AzureSdkCredential,
     DeferredCredential,
@@ -13,7 +16,7 @@
 )
 
 
-class TestDeferredCredential:
+class TestAzureSdkCredential:
     def test_confirm_credentials_interactive(
         self,
         mock_confirm_yes,  # noqa: ARG002
@@ -33,14 +36,17 @@ def test_confirm_credentials_interactive_fail(
         self,
         mock_confirm_no,  # noqa: ARG002
         mock_azureclicredential_get_token,  # noqa: ARG002
+        capsys,
     ):
         DeferredCredential.cache_ = set()
         credential = AzureSdkCredential(skip_confirmation=False)
         with pytest.raises(
-            DataSafeHavenAzureError,
-            match="Error getting account information from Azure CLI.",
+            DataSafeHavenCachedCredentialError,
+            match="Selected credentials are incorrect.",
         ):
             credential.get_credential()
+        out, _ = capsys.readouterr()
+        assert "Please authenticate with Azure: run 'az login'" in out
 
     def test_confirm_credentials_interactive_cache(
         self,
@@ -67,8 +73,6 @@ def test_decode_token_error(
         ):
             credential.decode_token(credential.token)
 
-
-class TestAzureSdkCredential:
     def test_get_credential(self, mock_azureclicredential_get_token):  # noqa: ARG002
         credential = AzureSdkCredential(skip_confirmation=True)
         assert isinstance(credential.get_credential(), AzureCliCredential)