diff --git a/.all-contributorsrc b/.all-contributorsrc index a36e18670c..576f5b41c8 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -580,7 +580,8 @@ "profile": "https://carlos.gavidia.me/", "contributions": [ "bug", - "ideas" + "ideas", + "doc" ] }, { diff --git a/.github/ISSUE_TEMPLATE/deployment_bug_report.md b/.github/ISSUE_TEMPLATE/deployment_bug_report.md index 6cf453cc13..fa569038c2 100644 --- a/.github/ISSUE_TEMPLATE/deployment_bug_report.md +++ b/.github/ISSUE_TEMPLATE/deployment_bug_report.md @@ -29,7 +29,6 @@ Before reporting a problem please check the following. Replace the empty checkbo List of packages diff --git a/.github/ISSUE_TEMPLATE/release_checklist.md b/.github/ISSUE_TEMPLATE/release_checklist.md index a25064faa1..575f5c9c53 100644 --- a/.github/ISSUE_TEMPLATE/release_checklist.md +++ b/.github/ISSUE_TEMPLATE/release_checklist.md @@ -14,9 +14,9 @@ Before reporting a problem please check the following. Replace the empty checkbo Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deployment) section of our documentation when completing these steps. -- [ ] Consult the `data-safe-haven/VERSIONING.md` guide and determine the version number of the new release. Record it in the title of this issue. +- [ ] Consult the `data-safe-haven/VERSIONING.md` guide and determine the version number of the new release. Record it in the title of this issue - [ ] Create a release branch called e.g. `release-v0.0.1` -- [ ] Draft a changelog for the release similar to our previous releases, see https://github.com/alan-turing-institute/data-safe-haven/releases +- [ ] Draft a changelog for the release similar to our [previous releases](https://github.com/alan-turing-institute/data-safe-haven/releases) ### For patch releases only @@ -34,26 +34,27 @@ Refer to the [Deployment](https://data-safe-haven.readthedocs.io/en/latest/deplo ### For major releases only - [ ] Confirm that a third party has carried out a full penetration test evaluating: - 1. external attack surface - 1. ability to exfiltrate data from the system - 1. ability to transfer data between SREs - 1. ability to escalate privileges on the SRD. + 1. external attack surface + 1. ability to exfiltrate data from the system + 1. ability to transfer data between SREs + 1. ability to escalate privileges on the SRD. ### Update documentation - [ ] Update supported versions in `SECURITY.md` - [ ] Update pen test results in `VERSIONING.md` -## :computer: Release information +### Making the release -- **Version number:** _ -- **SHM ID:** _ -- **T2 SRE ID:** _ -- **T3 SRE ID:** _ +- [ ] Merge release branch into `latest` +- [ ] Create a tag of the form `v0.0.1` pointing to the most recent commit on `latest` (the merge that you just made) +- [ ] Publish your draft GitHub release using this tag +- [ ] Ensure docs for the latest version are built and deployed on ReadTheDocs +- [ ] Push a build to PyPI +- [ ] Announce release on communications channels ## :deciduous_tree: Deployment problems - diff --git a/.github/scripts/update_azure_data_studio.py b/.github/scripts/update_azure_data_studio.py deleted file mode 100644 index 651e85fdfc..0000000000 --- a/.github/scripts/update_azure_data_studio.py +++ /dev/null @@ -1,19 +0,0 @@ -#! /usr/bin/env python3 -from lxml import html -import hashlib -import requests - -remote_page = requests.get("https://docs.microsoft.com/en-us/sql/azure-data-studio/download-azure-data-studio", allow_redirects=True) -root = html.fromstring(remote_page.content) -short_link = root.xpath("//a[contains(text(), '.deb')]/@href")[0] - -remote_content = requests.get(short_link, allow_redirects=True) -sha256 = hashlib.sha256(remote_content.content).hexdigest() -version = remote_content.url.split("-")[-1].replace(".deb", "") -remote = "/".join(remote_content.url.split("/")[:-1] + ["|DEBFILE|"]) - -with open("deployment/secure_research_desktop/packages/deb-azuredatastudio.version", "w") as f_out: - f_out.write(f"hash: {sha256}\n") - f_out.write(f"version: {version}\n") - f_out.write("debfile: azuredatastudio-linux-|VERSION|.deb\n") - f_out.write(f"remote: {remote}\n") diff --git a/.github/scripts/update_dbeaver_drivers.py b/.github/scripts/update_dbeaver_drivers.py deleted file mode 100644 index 696a501858..0000000000 --- a/.github/scripts/update_dbeaver_drivers.py +++ /dev/null @@ -1,72 +0,0 @@ -#! /usr/bin/env python3 -import json -from lxml import html -from natsort import natsorted -import requests - - -def get_latest_version(url, search_text): - """ - Get latest version number of a database driver from the Maven repository. - - Fetches the HTML page at the given URL, then converts it to an lxml tree. - Numeric strings are then extracted. - Note that mostly numeric strings for some drivers contain non-numeric text, - as different driver types exist for those drivers, even where the version number is the same. - The largest (latest) version number of the driver is then returned. - - Parameters - ---------- - url : str - The URL of the Maven repository containing the driver - search_text : str - Text to search for in the repository, to distinguish the driver from other files - - Returns - ------- - list - The latest available version number of the driver - """ - - remote_page = requests.get(url, allow_redirects=True) - root = html.fromstring(remote_page.content) - return natsorted([v for v in root.xpath("//a[contains(text(), '" + search_text + "')]/@href") if v != "../"])[-1].replace("/", "") - - -drivers = [ - { - 'name': "mssql_jdbc", - 'url': "https://repo1.maven.org/maven2/com/microsoft/sqlserver/mssql-jdbc/", - 'search_text': "jre8/" - }, - { - 'name': "pgjdbc", - 'url': "https://repo1.maven.org/maven2/org/postgresql/pgjdbc-versions/", - 'search_text': "/" - }, - { - 'name': "postgresql", - 'url': "https://repo1.maven.org/maven2/org/postgresql/postgresql/", - 'search_text': "/" - }, - { - 'name': "postgis_geometry", - 'url': "https://repo1.maven.org/maven2/net/postgis/postgis-geometry/", - 'search_text': "/" - }, - { - 'name': "postgis_jdbc", - 'url': "https://repo1.maven.org/maven2/net/postgis/postgis-jdbc/", - 'search_text': "/" - }, - { - 'name': "waffle_jna", - 'url': "https://repo1.maven.org/maven2/com/github/waffle/waffle-jna/", - 'search_text': "/" - } -] - -output = {driver['name']: get_latest_version(driver['url'], driver['search_text']) for driver in drivers} - -with open("deployment/secure_research_desktop/packages/dbeaver-driver-versions.json", "w") as f_out: - f_out.writelines(json.dumps(output, indent=4, sort_keys=True)) diff --git a/.github/scripts/update_rstudio.py b/.github/scripts/update_rstudio.py deleted file mode 100644 index ee36a35e66..0000000000 --- a/.github/scripts/update_rstudio.py +++ /dev/null @@ -1,21 +0,0 @@ -#! /usr/bin/env python3 -from lxml import html -import hashlib -import requests - -remote_page = requests.get("https://www.rstudio.com/products/rstudio/download/", allow_redirects=True) -root = html.fromstring(remote_page.content) -short_links = [link for link in root.xpath("//a[contains(text(), '.deb')]/@href") if "debian" not in link] - -for ubuntu_version in ["focal", "jammy"]: - short_link = [link for link in short_links if ubuntu_version in link][0] - remote_content = requests.get(short_link, allow_redirects=True) - sha256 = hashlib.sha256(remote_content.content).hexdigest() - version = "-".join(remote_content.url.split("/")[-1].split("-")[1:-1]) - remote = "/".join(remote_content.url.split("/")[:-1] + ["|DEBFILE|"]) - - with open(f"deployment/secure_research_desktop/packages/deb-rstudio-{ubuntu_version}.version", "w") as f_out: - f_out.write(f"hash: {sha256}\n") - f_out.write(f"version: {version}\n") - f_out.write("debfile: rstudio-|VERSION|-amd64.deb\n") - f_out.write(f"remote: {remote}\n") diff --git a/.github/security_checklist_template.md b/.github/security_checklist_template.md deleted file mode 100644 index b963331eef..0000000000 --- a/.github/security_checklist_template.md +++ /dev/null @@ -1,167 +0,0 @@ -# Security checklist -Running on SHM/SREs deployed using commit XXXXXXX - -## Summary -+ :white_check_mark: N tests passed -- :partly_sunny: N tests partially passed (see below for more details) -- :fast_forward: N tests skipped (see below for more details) -- :x: N tests failed (see below for more details) - -## Details -Some security checks were skipped since: -- No managed device was available -- No access to a physical space with its own dedicated network was possible - -### Multifactor Authentication and Password strength -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the SRE standard user cannot access the apps - +
:camera: Verify before adding to group: Microsoft Remote Desktop: Login works but apps cannot be viewed - -
- +
:camera: Verify before adding to group: Guacamole: User is prompted to setup MFA - -
- -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that adding the **SRE standard user** to the SRE group on the domain controller does not give them access - +
:camera: Verify after adding to group: Microsoft Remote Desktop: Login works and apps can be viewed - -
- +
:camera: Verify after adding to group: Microsoft Remote Desktop: attempt to login to DSVM Main (Desktop) fails - -
- +
:camera: Verify before adding to group: Guacamole: User is prompted to setup MFA - -
- -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** is able to successfully set up MFA - +
:camera: Verify: successfully set up MFA - -
- -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** can authenticate with MFA - +
:camera: Verify: Guacamole: respond to the MFA prompt - 122043131-47bc8080-cddb-11eb-8578-e45ab3efaef0.png"> -
- +
:camera: Verify: Microsoft Remote Desktop: attempt to log in to DSVM Main (Desktop) and respond to the MFA prompt - 122043131-47bc8080-cddb-11eb-8578-e45ab3efaef0.png"> -
- -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that the **SRE standard user** can access the DSVM desktop - +
:camera: Verify: Microsoft Remote Desktop: connect to DSVM Main (Desktop) - -
- +
:camera: Verify: Guacamole: connect to Desktop: Ubuntu0 - -
- -### Isolated Network -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connect to the SHM DC and NPS if connected to the SHM VPN -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the SHM DC and NPS if not connected to the SHM VPN -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Fail to connect to the internet from within a DSVM on the SRE network. - +
:camera: Verify: Connection fails - 122045859-8142bb00-cdde-11eb-920c-3a162a180647.png"> -
- +
:camera: Verify: that you cannot access a website using curl - -
- +
:camera: Verify: that you cannot get the IP address for a website using nslookup - -
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check that users cannot connect between two SREs within the same SHM, even if they have access to both SREs - +
:camera: Verify: SSH connection fails - -
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules are set appropriately to block outgoing traffic - +
:camera: Verify: access rules - -
- -### User devices -#### Tier 2: -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connection succeeds from a personal device with an allow-listed IP address -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check connection - -#### Tier 3: -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check user lacks root access -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Connection succeeds from a personal device with an allow-listed IP address -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No managed device available to check connection with an allow-listed IP address - -#### Tiers 2+: -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Network rules permit access only from allow-listed IP addresses - +
:camera: Verify: access rules - -
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: All non-deployment NSGs have rules denying inbound connections from outside the Virtual Network - -### Physical security -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so connection from outside was not tested -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so connection from inside was not tested -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Check the network IP ranges corresponding to the research spaces and compare against the IPs accepted by the firewall. -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: No secure physical space available so confirmation of physical measures was not tested - -### Remote connections - -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to connect as a user to the remote desktop server via SSH - +
:camera: Verify: SSH connection by FQDN fails - -
- +
:camera: Verify: SSH connection by public IP address fails - -
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: The remote desktop server is the only SRE resource with a public IP address - -### Copy-and-paste -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to paste local text into a DSVM -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Unable to copy text from a DSVM -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Copy between VMs in an SRE succeeds - -### Data ingress -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** token was sent using a secure, out-of-band communication channel (e.g. secure email) -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an allow-listed IP address succeeds -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** downloading a file from an allow-listed IP address fails -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading a file from an non-allowed IP address fails -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** connection during lifetime of short-duration token succeeds -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** connection after lifetime of short-duration token fails -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading different file types succeeds - -### Storage volumes and egress -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to the `/output` volume -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can only read from the `/data` volume -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to their directory in `/home` -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** can read and write to the `/shared` volume -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** can see the files ready for egress -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** can download egress-ready files - -### Software Ingress -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** expected software tools are installed - +
:camera: Verify: DBeaver, RStudio, PyCharm and Visual Studio Code available - 122056611-0a132400-cdea-11eb-9087-385ab296189e.png"> -
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** secure upload token successfully created with write-only permissions -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **Data Provider:** uploading is possible only during the token lifetime -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **System administrator:** uploaded files are readable and can be installed on the DSVM -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: **SRE standard user** uploaded files are readable but cannot be installed on the DSVM - -### Package mirrors - -#### Tier 2: -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install any packages - +
:camera: Verify: botocore can be installed - -
- -#### Tier 3: -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Can install only allow-listed packages - +
:camera: Verify: aero-calc can be installed; botocore cannot be installed - -
- -### Azure firewalls -+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Admin has limited access to the internet - +
:camera: Verify: SHM DC cannot connect to google - 122067607-ff5d8c80-cdf3-11eb-8e20-a401faba0be4.png"> -
-+ :white_check_mark:/:partly_sunny:/:fast_forward:/:x: Admin can download Windows updates - +
:camera: Verify: Windows updates can be downloaded - 122067641-071d3100-cdf4-11eb-9dc8-03938ff49e3a.png"> -
diff --git a/.github/workflows/lint_code.yaml b/.github/workflows/lint_code.yaml index 7786fc4b62..4d0caed16c 100644 --- a/.github/workflows/lint_code.yaml +++ b/.github/workflows/lint_code.yaml @@ -108,7 +108,7 @@ jobs: done rm expanded.tmp - name: Lint YAML - uses: karancode/yamllint-github-action@v2.1.1 + uses: karancode/yamllint-github-action@v3.0.0 with: yamllint_strict: true yamllint_comment: false diff --git a/.hatch/requirements-docs.txt b/.hatch/requirements-docs.txt index 95fcbdfd41..01ef044438 100644 --- a/.hatch/requirements-docs.txt +++ b/.hatch/requirements-docs.txt @@ -1,17 +1,125 @@ # # This file is autogenerated by hatch-pip-compile with Python 3.12 # +# - appdirs==1.4.4 +# - azure-core==1.32.0 +# - azure-identity==1.19.0 +# - azure-keyvault-certificates==4.9.0 +# - azure-keyvault-keys==4.10.0 +# - azure-keyvault-secrets==4.9.0 +# - azure-mgmt-compute==33.0.0 +# - azure-mgmt-containerinstance==10.1.0 +# - azure-mgmt-dns==8.2.0 +# - azure-mgmt-keyvault==10.3.1 +# - azure-mgmt-msi==7.0.0 +# - azure-mgmt-rdbms==10.1.0 +# - azure-mgmt-resource==23.2.0 +# - azure-mgmt-storage==21.2.1 +# - azure-storage-blob==12.24.0 +# - azure-storage-file-datalake==12.18.0 +# - azure-storage-file-share==12.20.0 +# - chevron==0.14.0 +# - cryptography==44.0.0 +# - fqdn==1.5.1 +# - psycopg[binary]==3.1.19 +# - pulumi-azure-native==2.76.0 +# - pulumi-azuread==6.0.1 +# - pulumi-random==4.16.7 +# - pulumi==3.142.0 +# - pydantic==2.10.3 +# - pyjwt[crypto]==2.10.1 +# - pytz==2024.2 +# - pyyaml==6.0.2 +# - rich==13.9.4 +# - simple-acme-dns==3.2.0 +# - typer==0.15.1 +# - websocket-client==1.8.0 # - emoji==2.14.0 # - myst-parser==4.0.0 # - pydata-sphinx-theme==0.16.0 # - sphinx-togglebutton==0.3.2 # - sphinx==8.1.3 +# - sphinxcontrib-typer==0.5.1 # accessible-pygments==0.0.5 # via pydata-sphinx-theme +acme==2.10.0 + # via simple-acme-dns alabaster==1.0.0 # via sphinx +annotated-types==0.7.0 + # via pydantic +appdirs==1.4.4 + # via hatch.envs.docs +arpeggio==2.0.2 + # via parver +attrs==24.2.0 + # via parver +azure-common==1.1.28 + # via + # azure-mgmt-compute + # azure-mgmt-containerinstance + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-msi + # azure-mgmt-rdbms + # azure-mgmt-resource + # azure-mgmt-storage +azure-core==1.32.0 + # via + # hatch.envs.docs + # azure-identity + # azure-keyvault-certificates + # azure-keyvault-keys + # azure-keyvault-secrets + # azure-mgmt-core + # azure-storage-blob + # azure-storage-file-datalake + # azure-storage-file-share + # msrest +azure-identity==1.19.0 + # via hatch.envs.docs +azure-keyvault-certificates==4.9.0 + # via hatch.envs.docs +azure-keyvault-keys==4.10.0 + # via hatch.envs.docs +azure-keyvault-secrets==4.9.0 + # via hatch.envs.docs +azure-mgmt-compute==33.0.0 + # via hatch.envs.docs +azure-mgmt-containerinstance==10.1.0 + # via hatch.envs.docs +azure-mgmt-core==1.5.0 + # via + # azure-mgmt-compute + # azure-mgmt-containerinstance + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-msi + # azure-mgmt-rdbms + # azure-mgmt-resource + # azure-mgmt-storage +azure-mgmt-dns==8.2.0 + # via hatch.envs.docs +azure-mgmt-keyvault==10.3.1 + # via hatch.envs.docs +azure-mgmt-msi==7.0.0 + # via hatch.envs.docs +azure-mgmt-rdbms==10.1.0 + # via hatch.envs.docs +azure-mgmt-resource==23.2.0 + # via hatch.envs.docs +azure-mgmt-storage==21.2.1 + # via hatch.envs.docs +azure-storage-blob==12.24.0 + # via + # hatch.envs.docs + # azure-storage-file-datalake +azure-storage-file-datalake==12.18.0 + # via hatch.envs.docs +azure-storage-file-share==12.20.0 + # via hatch.envs.docs babel==2.16.0 # via # pydata-sphinx-theme @@ -19,9 +127,37 @@ babel==2.16.0 beautifulsoup4==4.12.3 # via pydata-sphinx-theme certifi==2024.8.30 - # via requests + # via + # msrest + # requests +cffi==1.17.1 + # via cryptography charset-normalizer==3.4.0 # via requests +chevron==0.14.0 + # via hatch.envs.docs +click==8.1.7 + # via + # typer + # typer-slim +cryptography==44.0.0 + # via + # hatch.envs.docs + # acme + # azure-identity + # azure-keyvault-keys + # azure-storage-blob + # azure-storage-file-share + # josepy + # msal + # pyjwt + # pyopenssl +debugpy==1.8.9 + # via pulumi +dill==0.3.9 + # via pulumi +dnspython==2.7.0 + # via simple-acme-dns docutils==0.21.2 # via # myst-parser @@ -30,39 +166,151 @@ docutils==0.21.2 # sphinx-togglebutton emoji==2.14.0 # via hatch.envs.docs +fqdn==1.5.1 + # via hatch.envs.docs +grpcio==1.66.2 + # via pulumi idna==3.10 # via requests imagesize==1.4.1 # via sphinx +isodate==0.7.2 + # via + # azure-keyvault-certificates + # azure-keyvault-keys + # azure-keyvault-secrets + # azure-mgmt-compute + # azure-mgmt-containerinstance + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-resource + # azure-mgmt-storage + # azure-storage-blob + # azure-storage-file-datalake + # azure-storage-file-share + # msrest jinja2==3.1.4 # via # myst-parser # sphinx +josepy==1.14.0 + # via acme markdown-it-py==3.0.0 # via # mdit-py-plugins # myst-parser + # rich markupsafe==3.0.2 # via jinja2 mdit-py-plugins==0.4.2 # via myst-parser mdurl==0.1.2 # via markdown-it-py +msal==1.31.1 + # via + # azure-identity + # msal-extensions +msal-extensions==1.2.0 + # via azure-identity +msrest==0.7.1 + # via + # azure-mgmt-msi + # azure-mgmt-rdbms myst-parser==4.0.0 # via hatch.envs.docs +oauthlib==3.2.2 + # via requests-oauthlib packaging==24.2 # via sphinx +parver==0.5 + # via + # pulumi-azure-native + # pulumi-azuread + # pulumi-random +portalocker==2.10.1 + # via msal-extensions +protobuf==4.25.5 + # via pulumi +psycopg==3.1.19 + # via hatch.envs.docs +psycopg-binary==3.1.19 + # via psycopg +pulumi==3.142.0 + # via + # hatch.envs.docs + # pulumi-azure-native + # pulumi-azuread + # pulumi-random +pulumi-azure-native==2.76.0 + # via hatch.envs.docs +pulumi-azuread==6.0.1 + # via hatch.envs.docs +pulumi-random==4.16.7 + # via hatch.envs.docs +pycparser==2.22 + # via cffi +pydantic==2.10.3 + # via hatch.envs.docs +pydantic-core==2.27.1 + # via pydantic pydata-sphinx-theme==0.16.0 # via hatch.envs.docs pygments==2.18.0 # via # accessible-pygments # pydata-sphinx-theme + # rich # sphinx +pyjwt==2.10.1 + # via + # hatch.envs.docs + # msal +pyopenssl==24.3.0 + # via + # acme + # josepy +pyrfc3339==2.0.1 + # via acme +pytz==2024.2 + # via + # hatch.envs.docs + # acme pyyaml==6.0.2 - # via myst-parser + # via + # hatch.envs.docs + # myst-parser + # pulumi requests==2.32.3 - # via sphinx + # via + # acme + # azure-core + # msal + # msrest + # requests-oauthlib + # sphinx +requests-oauthlib==2.0.0 + # via msrest +rich==13.9.4 + # via + # hatch.envs.docs + # typer + # typer-slim +semver==2.13.0 + # via + # pulumi + # pulumi-azure-native + # pulumi-azuread + # pulumi-random +shellingham==1.5.4 + # via + # typer + # typer-slim +simple-acme-dns==3.2.0 + # via hatch.envs.docs +six==1.17.0 + # via + # azure-core + # pulumi snowballstemmer==2.2.0 # via sphinx soupsieve==2.6 @@ -73,6 +321,7 @@ sphinx==8.1.3 # myst-parser # pydata-sphinx-theme # sphinx-togglebutton + # sphinxcontrib-typer sphinx-togglebutton==0.3.2 # via hatch.envs.docs sphinxcontrib-applehelp==2.0.0 @@ -87,11 +336,39 @@ sphinxcontrib-qthelp==2.0.0 # via sphinx sphinxcontrib-serializinghtml==2.0.0 # via sphinx +sphinxcontrib-typer==0.5.1 + # via hatch.envs.docs +typer==0.15.1 + # via hatch.envs.docs +typer-slim==0.15.1 + # via sphinxcontrib-typer typing-extensions==4.12.2 - # via pydata-sphinx-theme + # via + # azure-core + # azure-identity + # azure-keyvault-certificates + # azure-keyvault-keys + # azure-keyvault-secrets + # azure-mgmt-compute + # azure-mgmt-dns + # azure-mgmt-keyvault + # azure-mgmt-resource + # azure-storage-blob + # azure-storage-file-datalake + # azure-storage-file-share + # psycopg + # pydantic + # pydantic-core + # pydata-sphinx-theme + # typer + # typer-slim urllib3==2.2.3 # via requests -wheel==0.45.0 +validators==0.34.0 + # via simple-acme-dns +websocket-client==1.8.0 + # via hatch.envs.docs +wheel==0.45.1 # via sphinx-togglebutton # The following packages are considered to be unsafe in a requirements file: diff --git a/.hatch/requirements-lint.txt b/.hatch/requirements-lint.txt index 2ffa3e2ba4..bdef81d7a0 100644 --- a/.hatch/requirements-lint.txt +++ b/.hatch/requirements-lint.txt @@ -1,13 +1,13 @@ # # This file is autogenerated by hatch-pip-compile with Python 3.12 # -# - ansible-dev-tools==24.10.2 -# - ansible==10.6.0 +# - ansible-dev-tools==24.11.0 +# - ansible==11.1.0 # - black==24.10.0 # - mypy==1.13.0 -# - pandas-stubs==2.2.3.241009 -# - pydantic==2.9.2 -# - ruff==0.7.3 +# - pandas-stubs==2.2.3.241126 +# - pydantic==2.10.3 +# - ruff==0.8.2 # - types-appdirs==1.4.3.5 # - types-chevron==0.14.2.20240310 # - types-pytz==2024.2.0.20241003 @@ -17,36 +17,36 @@ annotated-types==0.7.0 # via pydantic -ansible==10.6.0 +ansible==11.1.0 # via hatch.envs.lint ansible-builder==3.1.0 # via # ansible-dev-environment # ansible-dev-tools # ansible-navigator -ansible-compat==24.9.1 +ansible-compat==24.10.0 # via # ansible-lint # molecule # pytest-ansible -ansible-core==2.17.6 +ansible-core==2.18.1 # via # ansible # ansible-compat # ansible-lint # molecule # pytest-ansible -ansible-creator==24.10.1 +ansible-creator==24.12.0 # via ansible-dev-tools ansible-dev-environment==24.9.0 # via ansible-dev-tools -ansible-dev-tools==24.10.2 +ansible-dev-tools==24.11.0 # via hatch.envs.lint -ansible-lint==24.9.2 +ansible-lint==24.10.0 # via # ansible-dev-tools # ansible-navigator -ansible-navigator==24.9.0 +ansible-navigator==24.10.0 # via ansible-dev-tools ansible-runner==2.4.0 # via ansible-navigator @@ -81,7 +81,7 @@ click-help-colors==0.9.4 # via molecule colorama==0.4.6 # via tox -cryptography==43.0.3 +cryptography==44.0.0 # via ansible-core distlib==0.3.9 # via @@ -125,7 +125,7 @@ markupsafe==3.0.2 # via jinja2 mdurl==0.1.2 # via markdown-it-py -molecule==24.9.0 +molecule==24.12.0 # via ansible-dev-tools mypy==1.13.0 # via hatch.envs.lint @@ -133,7 +133,7 @@ mypy-extensions==1.0.0 # via # black # mypy -numpy==2.1.3 +numpy==2.2.0 # via pandas-stubs onigurumacffi==1.3.0 # via ansible-navigator @@ -151,7 +151,7 @@ packaging==24.2 # pytest # pytest-ansible # tox -pandas-stubs==2.2.3.241009 +pandas-stubs==2.2.3.241126 # via hatch.envs.lint parsley==1.3 # via bindep @@ -178,15 +178,15 @@ ptyprocess==0.7.0 # via pexpect pycparser==2.22 # via cffi -pydantic==2.9.2 +pydantic==2.10.3 # via hatch.envs.lint -pydantic-core==2.23.4 +pydantic-core==2.27.1 # via pydantic pygments==2.18.0 # via rich pyproject-api==1.8.0 # via tox -pytest==8.3.3 +pytest==8.3.4 # via # pytest-ansible # pytest-xdist @@ -197,7 +197,7 @@ pytest-ansible==24.9.0 # tox-ansible pytest-xdist==3.6.1 # via tox-ansible -python-daemon==3.1.0 +python-daemon==3.1.2 # via ansible-runner python-gnupg==0.5.3 # via ansible-sign @@ -225,7 +225,7 @@ rich==13.9.4 # ansible-lint # enrich # molecule -rpds-py==0.21.0 +rpds-py==0.22.3 # via # jsonschema # referencing @@ -233,7 +233,7 @@ ruamel-yaml==0.18.6 # via ansible-lint ruamel-yaml-clib==0.2.12 # via ruamel-yaml -ruff==0.7.3 +ruff==0.8.2 # via hatch.envs.lint subprocess-tee==0.4.2 # via @@ -242,7 +242,7 @@ subprocess-tee==0.4.2 # ansible-lint tox==4.23.2 # via tox-ansible -tox-ansible==24.9.0 +tox-ansible==24.10.0 # via ansible-dev-tools types-appdirs==1.4.3.5 # via hatch.envs.lint @@ -265,7 +265,7 @@ tzdata==2024.2 # via ansible-navigator urllib3==2.2.3 # via types-requests -virtualenv==20.27.1 +virtualenv==20.28.0 # via tox wcmatch==10.0 # via diff --git a/.hatch/requirements-test.txt b/.hatch/requirements-test.txt index 3b6dd5421f..177bcd5953 100644 --- a/.hatch/requirements-test.txt +++ b/.hatch/requirements-test.txt @@ -1,7 +1,7 @@ # # This file is autogenerated by hatch-pip-compile with Python 3.12 # -# [constraints] .hatch/requirements.txt (SHA256: ac6baaee77c4015ca7749690da5f658092b9d5572c7e4083c08b8c4bf75c5c6c) +# [constraints] .hatch/requirements.txt (SHA256: 5b14c21af4fbc3de1ff8ee0f3a57b092aa1a5c814ad56dccb0b50fd2d010eeb2) # # - appdirs==1.4.4 # - azure-core==1.32.0 @@ -17,29 +17,29 @@ # - azure-mgmt-rdbms==10.1.0 # - azure-mgmt-resource==23.2.0 # - azure-mgmt-storage==21.2.1 -# - azure-storage-blob==12.23.1 -# - azure-storage-file-datalake==12.17.0 -# - azure-storage-file-share==12.19.0 +# - azure-storage-blob==12.24.0 +# - azure-storage-file-datalake==12.18.0 +# - azure-storage-file-share==12.20.0 # - chevron==0.14.0 -# - cryptography==43.0.3 +# - cryptography==44.0.0 # - fqdn==1.5.1 # - psycopg[binary]==3.1.19 -# - pulumi-azure-native==2.71.0 +# - pulumi-azure-native==2.76.0 # - pulumi-azuread==6.0.1 # - pulumi-random==4.16.7 -# - pulumi==3.138.0 -# - pydantic==2.9.2 -# - pyjwt[crypto]==2.9.0 +# - pulumi==3.142.0 +# - pydantic==2.10.3 +# - pyjwt[crypto]==2.10.1 # - pytz==2024.2 # - pyyaml==6.0.2 # - rich==13.9.4 # - simple-acme-dns==3.2.0 -# - typer==0.13.0 +# - typer==0.15.1 # - websocket-client==1.8.0 -# - coverage==7.6.4 +# - coverage==7.6.9 # - freezegun==1.5.1 # - pytest-mock==3.14.0 -# - pytest==8.3.3 +# - pytest==8.3.4 # - requests-mock==1.12.1 # @@ -146,16 +146,16 @@ azure-mgmt-storage==21.2.1 # via # -c .hatch/requirements.txt # hatch.envs.test -azure-storage-blob==12.23.1 +azure-storage-blob==12.24.0 # via # -c .hatch/requirements.txt # hatch.envs.test # azure-storage-file-datalake -azure-storage-file-datalake==12.17.0 +azure-storage-file-datalake==12.18.0 # via # -c .hatch/requirements.txt # hatch.envs.test -azure-storage-file-share==12.19.0 +azure-storage-file-share==12.20.0 # via # -c .hatch/requirements.txt # hatch.envs.test @@ -180,9 +180,9 @@ click==8.1.7 # via # -c .hatch/requirements.txt # typer -coverage==7.6.4 +coverage==7.6.9 # via hatch.envs.test -cryptography==43.0.3 +cryptography==44.0.0 # via # -c .hatch/requirements.txt # hatch.envs.test @@ -195,7 +195,7 @@ cryptography==43.0.3 # msal # pyjwt # pyopenssl -debugpy==1.8.8 +debugpy==1.8.9 # via # -c .hatch/requirements.txt # pulumi @@ -251,7 +251,7 @@ mdurl==0.1.2 # via # -c .hatch/requirements.txt # markdown-it-py -msal==1.31.0 +msal==1.31.1 # via # -c .hatch/requirements.txt # azure-identity @@ -295,14 +295,14 @@ psycopg-binary==3.1.19 # via # -c .hatch/requirements.txt # psycopg -pulumi==3.138.0 +pulumi==3.142.0 # via # -c .hatch/requirements.txt # hatch.envs.test # pulumi-azure-native # pulumi-azuread # pulumi-random -pulumi-azure-native==2.71.0 +pulumi-azure-native==2.76.0 # via # -c .hatch/requirements.txt # hatch.envs.test @@ -318,11 +318,11 @@ pycparser==2.22 # via # -c .hatch/requirements.txt # cffi -pydantic==2.9.2 +pydantic==2.10.3 # via # -c .hatch/requirements.txt # hatch.envs.test -pydantic-core==2.23.4 +pydantic-core==2.27.1 # via # -c .hatch/requirements.txt # pydantic @@ -330,12 +330,12 @@ pygments==2.18.0 # via # -c .hatch/requirements.txt # rich -pyjwt==2.9.0 +pyjwt==2.10.1 # via # -c .hatch/requirements.txt # hatch.envs.test # msal -pyopenssl==24.2.1 +pyopenssl==24.3.0 # via # -c .hatch/requirements.txt # acme @@ -344,7 +344,7 @@ pyrfc3339==2.0.1 # via # -c .hatch/requirements.txt # acme -pytest==8.3.3 +pytest==8.3.4 # via # hatch.envs.test # pytest-mock @@ -397,13 +397,13 @@ simple-acme-dns==3.2.0 # via # -c .hatch/requirements.txt # hatch.envs.test -six==1.16.0 +six==1.17.0 # via # -c .hatch/requirements.txt # azure-core # pulumi # python-dateutil -typer==0.13.0 +typer==0.15.1 # via # -c .hatch/requirements.txt # hatch.envs.test diff --git a/.hatch/requirements.txt b/.hatch/requirements.txt index 5451c40b45..9506db78d5 100644 --- a/.hatch/requirements.txt +++ b/.hatch/requirements.txt @@ -15,24 +15,24 @@ # - azure-mgmt-rdbms==10.1.0 # - azure-mgmt-resource==23.2.0 # - azure-mgmt-storage==21.2.1 -# - azure-storage-blob==12.23.1 -# - azure-storage-file-datalake==12.17.0 -# - azure-storage-file-share==12.19.0 +# - azure-storage-blob==12.24.0 +# - azure-storage-file-datalake==12.18.0 +# - azure-storage-file-share==12.20.0 # - chevron==0.14.0 -# - cryptography==43.0.3 +# - cryptography==44.0.0 # - fqdn==1.5.1 # - psycopg[binary]==3.1.19 -# - pulumi-azure-native==2.71.0 +# - pulumi-azure-native==2.76.0 # - pulumi-azuread==6.0.1 # - pulumi-random==4.16.7 -# - pulumi==3.138.0 -# - pydantic==2.9.2 -# - pyjwt[crypto]==2.9.0 +# - pulumi==3.142.0 +# - pydantic==2.10.3 +# - pyjwt[crypto]==2.10.1 # - pytz==2024.2 # - pyyaml==6.0.2 # - rich==13.9.4 # - simple-acme-dns==3.2.0 -# - typer==0.13.0 +# - typer==0.15.1 # - websocket-client==1.8.0 # @@ -102,13 +102,13 @@ azure-mgmt-resource==23.2.0 # via hatch.envs.default azure-mgmt-storage==21.2.1 # via hatch.envs.default -azure-storage-blob==12.23.1 +azure-storage-blob==12.24.0 # via # hatch.envs.default # azure-storage-file-datalake -azure-storage-file-datalake==12.17.0 +azure-storage-file-datalake==12.18.0 # via hatch.envs.default -azure-storage-file-share==12.19.0 +azure-storage-file-share==12.20.0 # via hatch.envs.default certifi==2024.8.30 # via @@ -122,7 +122,7 @@ chevron==0.14.0 # via hatch.envs.default click==8.1.7 # via typer -cryptography==43.0.3 +cryptography==44.0.0 # via # hatch.envs.default # acme @@ -134,7 +134,7 @@ cryptography==43.0.3 # msal # pyjwt # pyopenssl -debugpy==1.8.8 +debugpy==1.8.9 # via pulumi dill==0.3.9 # via pulumi @@ -167,7 +167,7 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -msal==1.31.0 +msal==1.31.1 # via # azure-identity # msal-extensions @@ -192,13 +192,13 @@ psycopg==3.1.19 # via hatch.envs.default psycopg-binary==3.1.19 # via psycopg -pulumi==3.138.0 +pulumi==3.142.0 # via # hatch.envs.default # pulumi-azure-native # pulumi-azuread # pulumi-random -pulumi-azure-native==2.71.0 +pulumi-azure-native==2.76.0 # via hatch.envs.default pulumi-azuread==6.0.1 # via hatch.envs.default @@ -206,17 +206,17 @@ pulumi-random==4.16.7 # via hatch.envs.default pycparser==2.22 # via cffi -pydantic==2.9.2 +pydantic==2.10.3 # via hatch.envs.default -pydantic-core==2.23.4 +pydantic-core==2.27.1 # via pydantic pygments==2.18.0 # via rich -pyjwt==2.9.0 +pyjwt==2.10.1 # via # hatch.envs.default # msal -pyopenssl==24.2.1 +pyopenssl==24.3.0 # via # acme # josepy @@ -253,11 +253,11 @@ shellingham==1.5.4 # via typer simple-acme-dns==3.2.0 # via hatch.envs.default -six==1.16.0 +six==1.17.0 # via # azure-core # pulumi -typer==0.13.0 +typer==0.15.1 # via hatch.envs.default typing-extensions==4.12.2 # via diff --git a/.mdlstyle.rb b/.mdlstyle.rb index 7ca3c2af8b..80b6e14d8c 100644 --- a/.mdlstyle.rb +++ b/.mdlstyle.rb @@ -6,7 +6,7 @@ exclude_rule 'MD013' exclude_rule 'MD024' rule 'MD026', :punctuation => ".,;" -rule 'MD029', :style => :ordered +rule 'MD029', :style => :one exclude_rule 'MD033' exclude_rule 'MD034' exclude_rule 'MD041' # this conflicts with MyST target anchors diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 145d005f83..56f8f2b24f 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -16,6 +16,8 @@ formats: python: install: - requirements: .hatch/requirements-docs.txt + - method: pip + path: . sphinx: configuration: docs/source/conf.py diff --git a/README.md b/README.md index 79659201f0..95bdbac05e 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ See our [Code of Conduct](CODE_OF_CONDUCT.md) and our [Contributor Guide](CONTRI Benjamin Walden
Benjamin Walden

๐Ÿ“– ๐Ÿค” ๐Ÿ› ๐Ÿ“† ๐Ÿ““ Brett Todd
Brett Todd

๐Ÿ’ป ๐Ÿค” Callum Mole
Callum Mole

๐Ÿ› ๐Ÿ’ป - Carlos Gavidia-Calderon
Carlos Gavidia-Calderon

๐Ÿ› ๐Ÿค” + Carlos Gavidia-Calderon
Carlos Gavidia-Calderon

๐Ÿ› ๐Ÿค” ๐Ÿ“– Catalina Vallejos
Catalina Vallejos

๐Ÿ–‹ diff --git a/SECURITY.md b/SECURITY.md index db056c976b..c81368a94e 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -7,8 +7,8 @@ All organisations using an earlier version in production should update to the la | Version | Supported | | --------------------------------------------------------------------------------------- | ------------------ | -| [5.0.1](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.0.1) | :white_check_mark: | -| < 5.0.1 | :x: | +| [5.1.0](https://github.com/alan-turing-institute/data-safe-haven/releases/tag/v5.1.0) | :white_check_mark: | +| < 5.1.0 | :x: | ## Reporting a Vulnerability diff --git a/data_safe_haven/commands/sre.py b/data_safe_haven/commands/sre.py index 8c3e0b5cdc..14330c2cff 100644 --- a/data_safe_haven/commands/sre.py +++ b/data_safe_haven/commands/sre.py @@ -96,6 +96,7 @@ def deploy( ) # Set Entra options application = graph_api.get_application_by_name(context.entra_application_name) + if not application: msg = f"No Entra application '{context.entra_application_name}' was found. Please redeploy your SHM." raise DataSafeHavenConfigError(msg) @@ -162,6 +163,13 @@ def deploy( timezone=sre_config.sre.timezone, ) manager.run() + + console.print( + f"Secure Research Environment '[green]{name}[/]' has been successfully deployed.", + f"The SRE can be accessed at [green]https://{stack.output('sre_fqdn')}[/]", + sep="\n", + ) + except DataSafeHavenError as exc: logger.critical( f"Could not deploy Secure Research Environment '[green]{name}[/]'." diff --git a/data_safe_haven/commands/users.py b/data_safe_haven/commands/users.py index fe413fa781..8c8b232ceb 100644 --- a/data_safe_haven/commands/users.py +++ b/data_safe_haven/commands/users.py @@ -5,6 +5,7 @@ import typer +from data_safe_haven import console from data_safe_haven.administration.users import UserHandler from data_safe_haven.config import ContextManager, DSHPulumiConfig, SHMConfig, SREConfig from data_safe_haven.exceptions import DataSafeHavenError @@ -120,9 +121,9 @@ def register( # Load SHMConfig try: shm_config = SHMConfig.from_remote(context) - except DataSafeHavenError: + except DataSafeHavenError as exc: logger.error("Have you deployed the SHM?") - raise + raise typer.Exit(1) from exc # Load Pulumi config pulumi_config = DSHPulumiConfig.from_remote(context) @@ -132,7 +133,7 @@ def register( if sre_config.name not in pulumi_config.project_names: msg = f"Could not load Pulumi settings for '{sre_config.name}'. Have you deployed the SRE?" logger.error(msg) - raise DataSafeHavenError(msg) + raise typer.Exit(1) # Load GraphAPI graph_api = GraphApi.from_scopes( @@ -146,16 +147,29 @@ def register( # List users users = UserHandler(context, graph_api) - available_usernames = users.get_usernames_entra_id() + available_users = users.entra_users.list() + user_dict = { + user.preferred_username.split("@")[0]: user.preferred_username.split("@")[1] + for user in available_users + } usernames_to_register = [] for username in usernames: - if username in available_usernames: - usernames_to_register.append(username) + if user_domain := user_dict.get(username): + if shm_config.shm.fqdn not in user_domain: + console.print( + f"User [green]'{username}[/green]'s principal domain name is [blue]'{user_domain}'[/blue].\n" + f"SRE [yellow]'{sre}'[/yellow] belongs to SHM domain [blue]'{shm_config.shm.fqdn}'[/blue]." + ) + logger.error( + "The user's principal domain name must match the domain of the SRE to be registered." + ) + else: + usernames_to_register.append(username) else: logger.error( f"Username '{username}' does not belong to this Data Safe Haven deployment." - " Please use 'dsh users add' to create it." ) + console.print("Please use 'dsh users add' to create this user.") users.register(sre_config.name, usernames_to_register) except DataSafeHavenError as exc: logger.critical(f"Could not register Data Safe Haven users with SRE '{sre}'.") @@ -259,8 +273,8 @@ def unregister( else: logger.error( f"Username '{username}' does not belong to this Data Safe Haven deployment." - " Please use 'dsh users add' to create it." ) + console.print("Please use 'dsh users add' to create it.") for group_name in ( f"{sre_config.name} Users", f"{sre_config.name} Privileged Users", diff --git a/data_safe_haven/config/config_sections.py b/data_safe_haven/config/config_sections.py index 62bfec0833..f09da25648 100644 --- a/data_safe_haven/config/config_sections.py +++ b/data_safe_haven/config/config_sections.py @@ -57,7 +57,7 @@ class ConfigSectionSRE(BaseModel, validate_assignment=True): admin_email_address: EmailAddress admin_ip_addresses: list[IpAddress] = [] databases: UniqueList[DatabaseSystem] = [] - data_provider_ip_addresses: list[IpAddress] | AzureServiceTag = [] + data_provider_ip_addresses: list[IpAddress] = [] remote_desktop: ConfigSubsectionRemoteDesktopOpts research_user_ip_addresses: list[IpAddress] | AzureServiceTag = [] storage_quota_gb: ConfigSubsectionStorageQuotaGB diff --git a/data_safe_haven/console/__init__.py b/data_safe_haven/console/__init__.py index 133a48fc12..f30bda2882 100644 --- a/data_safe_haven/console/__init__.py +++ b/data_safe_haven/console/__init__.py @@ -1,5 +1,5 @@ from .format import tabulate -from .pretty import pretty_print as print # noqa: A001 +from .pretty import pretty_print as print # noqa: A004 from .prompts import confirm __all__ = [ diff --git a/data_safe_haven/exceptions/__init__.py b/data_safe_haven/exceptions/__init__.py index b22d70e693..a858cfaf6d 100644 --- a/data_safe_haven/exceptions/__init__.py +++ b/data_safe_haven/exceptions/__init__.py @@ -28,6 +28,16 @@ class DataSafeHavenAzureError(DataSafeHavenError): pass +class DataSafeHavenCachedCredentialError(DataSafeHavenError): + """ + Exception class for handling errors related to cached credentials. + + Raise this error when a cached credential is not the credential a user wants to use. + """ + + pass + + class DataSafeHavenAzureStorageError(DataSafeHavenAzureError): """ Exception class for handling errors when interacting with Azure Storage. diff --git a/data_safe_haven/external/__init__.py b/data_safe_haven/external/__init__.py index 5e46325958..d26ef75058 100644 --- a/data_safe_haven/external/__init__.py +++ b/data_safe_haven/external/__init__.py @@ -6,10 +6,10 @@ from .interface.pulumi_account import PulumiAccount __all__ = [ - "AzureSdk", "AzureContainerInstance", "AzureIPv4Range", "AzurePostgreSQLDatabase", + "AzureSdk", "GraphApi", "PulumiAccount", ] diff --git a/data_safe_haven/external/api/credentials.py b/data_safe_haven/external/api/credentials.py index bfeb9c3aeb..82e444cea5 100644 --- a/data_safe_haven/external/api/credentials.py +++ b/data_safe_haven/external/api/credentials.py @@ -18,7 +18,11 @@ from data_safe_haven import console from data_safe_haven.directories import config_dir -from data_safe_haven.exceptions import DataSafeHavenAzureError, DataSafeHavenValueError +from data_safe_haven.exceptions import ( + DataSafeHavenAzureError, + DataSafeHavenCachedCredentialError, + DataSafeHavenValueError, +) from data_safe_haven.logging import get_logger from data_safe_haven.types import AzureSdkCredentialScope @@ -28,6 +32,7 @@ class DeferredCredential(TokenCredential): tokens_: ClassVar[dict[str, AccessToken]] = {} cache_: ClassVar[set[tuple[str, str]]] = set() + name: ClassVar[str] = "Credential name" def __init__( self, @@ -66,32 +71,28 @@ def get_credential(self) -> TokenCredential: def confirm_credentials_interactive( self, - target_name: str, user_name: str, user_id: str, tenant_name: str, tenant_id: str, - ) -> None: + ) -> bool: """ Allow user to confirm that credentials are correct. Responses are cached so the user will only be prompted once per run. If 'skip_confirmation' is set, then no confirmation will be performed. - - Raises: - DataSafeHavenValueError: if the user indicates that the credentials are wrong """ if self.skip_confirmation: - return + return True if (user_id, tenant_id) in DeferredCredential.cache_: - return + return True + DeferredCredential.cache_.add((user_id, tenant_id)) - self.logger.info(f"You are logged into the [blue]{target_name}[/] as:") + self.logger.info(f"You are logged into the [blue]{self.name}[/] as:") self.logger.info(f"\tuser: [green]{user_name}[/] ({user_id})") self.logger.info(f"\ttenant: [green]{tenant_name}[/] ({tenant_id})") - if not console.confirm("Are these details correct?", default_to_yes=True): - msg = "Selected credentials are incorrect." - raise DataSafeHavenValueError(msg) + + return console.confirm("Are these details correct?", default_to_yes=True) def get_token( self, @@ -119,6 +120,8 @@ class AzureSdkCredential(DeferredCredential): Uses AzureCliCredential for authentication """ + name: ClassVar[str] = "Azure CLI" + def __init__( self, scope: AzureSdkCredentialScope = AzureSdkCredentialScope.DEFAULT, @@ -133,19 +136,22 @@ def get_credential(self) -> TokenCredential: # Confirm that these are the desired credentials try: decoded = self.decode_token(credential.get_token(*self.scopes).token) - self.confirm_credentials_interactive( - "Azure CLI", - user_name=decoded["name"], - user_id=decoded["oid"], - tenant_name=decoded["upn"].split("@")[1], - tenant_id=decoded["tid"], - ) except (CredentialUnavailableError, DataSafeHavenValueError) as exc: + msg = "Error getting account information from Azure CLI." + raise DataSafeHavenAzureError(msg) from exc + + if not self.confirm_credentials_interactive( + user_name=decoded["name"], + user_id=decoded["oid"], + tenant_name=decoded["upn"].split("@")[1], + tenant_id=decoded["tid"], + ): self.logger.error( "Please authenticate with Azure: run '[green]az login[/]' using [bold]infrastructure administrator[/] credentials." ) - msg = "Error getting account information from Azure CLI." - raise DataSafeHavenAzureError(msg) from exc + msg = "Selected credentials are incorrect." + raise DataSafeHavenCachedCredentialError(msg) + return credential @@ -156,6 +162,8 @@ class GraphApiCredential(DeferredCredential): Uses DeviceCodeCredential for authentication """ + name: ClassVar[str] = "Microsoft Graph API" + def __init__( self, tenant_id: str, @@ -214,13 +222,17 @@ def callback(verification_uri: str, user_code: str, _: datetime) -> None: raise DataSafeHavenAzureError(msg) from exc # Confirm that these are the desired credentials - self.confirm_credentials_interactive( - "Microsoft Graph API", + if not self.confirm_credentials_interactive( user_name=new_auth_record.username, user_id=new_auth_record._home_account_id.split(".")[0], tenant_name=new_auth_record._username.split("@")[1], tenant_id=new_auth_record._tenant_id, - ) + ): + self.logger.error( + f"Delete the cached credential file [green]{authentication_record_path}[/] and rerun dsh to authenticate with {self.name}" + ) + msg = "Selected credentials are incorrect." + raise DataSafeHavenCachedCredentialError(msg) # Return the credential return credential diff --git a/data_safe_haven/external/api/graph_api.py b/data_safe_haven/external/api/graph_api.py index 7d3b088672..d77e78120d 100644 --- a/data_safe_haven/external/api/graph_api.py +++ b/data_safe_haven/external/api/graph_api.py @@ -837,7 +837,10 @@ def read_applications(self) -> Sequence[dict[str, Any]]: "value" ] ] - except Exception as exc: + except ( + DataSafeHavenMicrosoftGraphError, + requests.JSONDecodeError, + ) as exc: msg = "Could not load list of applications." raise DataSafeHavenMicrosoftGraphError(msg) from exc diff --git a/data_safe_haven/infrastructure/common/__init__.py b/data_safe_haven/infrastructure/common/__init__.py index 6106cac731..85184d6574 100644 --- a/data_safe_haven/infrastructure/common/__init__.py +++ b/data_safe_haven/infrastructure/common/__init__.py @@ -16,6 +16,8 @@ __all__ = [ "DockerHubCredentials", + "SREDnsIpRanges", + "SREIpRanges", "get_address_prefixes_from_subnet", "get_available_ips_from_subnet", "get_id_from_rg", @@ -27,6 +29,4 @@ "get_name_from_subnet", "get_name_from_vnet", "get_subscription_id_from_rg", - "SREDnsIpRanges", - "SREIpRanges", ] diff --git a/data_safe_haven/infrastructure/components/__init__.py b/data_safe_haven/infrastructure/components/__init__.py index 2b3dd67e7a..52043d1ad3 100644 --- a/data_safe_haven/infrastructure/components/__init__.py +++ b/data_safe_haven/infrastructure/components/__init__.py @@ -9,6 +9,8 @@ MicrosoftSQLDatabaseProps, NFSV3BlobContainerComponent, NFSV3BlobContainerProps, + NFSV3StorageAccountComponent, + NFSV3StorageAccountProps, PostgresqlDatabaseComponent, PostgresqlDatabaseProps, VMComponent, @@ -23,7 +25,6 @@ ) from .wrapped import ( WrappedLogAnalyticsWorkspace, - WrappedNFSV3StorageAccount, ) __all__ = [ @@ -41,7 +42,8 @@ "MicrosoftSQLDatabaseProps", "NFSV3BlobContainerComponent", "NFSV3BlobContainerProps", - "WrappedNFSV3StorageAccount", + "NFSV3StorageAccountComponent", + "NFSV3StorageAccountProps", "PostgresqlDatabaseComponent", "PostgresqlDatabaseProps", "SSLCertificate", diff --git a/data_safe_haven/infrastructure/components/composite/__init__.py b/data_safe_haven/infrastructure/components/composite/__init__.py index bc09bc18a8..8e561dd73a 100644 --- a/data_safe_haven/infrastructure/components/composite/__init__.py +++ b/data_safe_haven/infrastructure/components/composite/__init__.py @@ -9,6 +9,10 @@ MicrosoftSQLDatabaseProps, ) from .nfsv3_blob_container import NFSV3BlobContainerComponent, NFSV3BlobContainerProps +from .nfsv3_storage_account import ( + NFSV3StorageAccountComponent, + NFSV3StorageAccountProps, +) from .postgresql_database import PostgresqlDatabaseComponent, PostgresqlDatabaseProps from .virtual_machine import LinuxVMComponentProps, VMComponent @@ -23,6 +27,8 @@ "MicrosoftSQLDatabaseProps", "NFSV3BlobContainerComponent", "NFSV3BlobContainerProps", + "NFSV3StorageAccountComponent", + "NFSV3StorageAccountProps", "PostgresqlDatabaseComponent", "PostgresqlDatabaseProps", "VMComponent", diff --git a/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py b/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py index 98564918a0..29550e9541 100644 --- a/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py +++ b/data_safe_haven/infrastructure/components/composite/nfsv3_blob_container.py @@ -1,7 +1,7 @@ from pulumi import ComponentResource, Input, ResourceOptions from pulumi_azure_native import storage -from data_safe_haven.infrastructure.components.dynamic.blob_container_acl import ( +from data_safe_haven.infrastructure.components.dynamic import ( BlobContainerAcl, BlobContainerAclProps, ) @@ -52,6 +52,7 @@ def __init__( ResourceOptions(parent=props.storage_account), ), ) + BlobContainerAcl( f"{storage_container._name}_acl", BlobContainerAclProps( diff --git a/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py b/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py new file mode 100644 index 0000000000..59a2f4e50b --- /dev/null +++ b/data_safe_haven/infrastructure/components/composite/nfsv3_storage_account.py @@ -0,0 +1,136 @@ +from collections.abc import Mapping, Sequence + +from pulumi import ComponentResource, Input, Output, ResourceOptions +from pulumi_azure_native import insights, storage + +from data_safe_haven.external import AzureIPv4Range +from data_safe_haven.infrastructure.components.wrapped import ( + WrappedLogAnalyticsWorkspace, +) + + +class NFSV3StorageAccountProps: + def __init__( + self, + account_name: Input[str], + allowed_ip_addresses: Input[Sequence[str]] | None, + location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], + resource_group_name: Input[str], + subnet_id: Input[str], + ): + self.account_name = account_name + self.allowed_ip_addresses = allowed_ip_addresses + self.location = location + self.log_analytics_workspace = log_analytics_workspace + self.resource_group_name = resource_group_name + self.subnet_id = subnet_id + + +class NFSV3StorageAccountComponent(ComponentResource): + encryption_args = storage.EncryptionArgs( + key_source=storage.KeySource.MICROSOFT_STORAGE, + services=storage.EncryptionServicesArgs( + blob=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + file=storage.EncryptionServiceArgs( + enabled=True, key_type=storage.KeyType.ACCOUNT + ), + ), + ) + + def __init__( + self, + name: str, + props: NFSV3StorageAccountProps, + opts: ResourceOptions | None = None, + tags: Input[Mapping[str, Input[str]]] | None = None, + ): + super().__init__("dsh:sre:NFSV3StorageAccountComponent", name, {}, opts) + child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) + child_tags = {"component": "data"} | (tags if tags else {}) + + ip_rules = Output.from_input(props.allowed_ip_addresses).apply( + lambda ip_ranges: [ + storage.IPRuleArgs( + action=storage.Action.ALLOW, + i_p_address_or_range=str(ip_address), + ) + for ip_range in sorted(ip_ranges) + for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips() + ] + ) + + # Deploy storage account + self.storage_account = storage.StorageAccount( + f"{self._name}", + account_name=props.account_name, + allow_blob_public_access=False, + enable_https_traffic_only=True, + enable_nfs_v3=True, + encryption=self.encryption_args, + is_hns_enabled=True, + kind=storage.Kind.BLOCK_BLOB_STORAGE, + location=props.location, + minimum_tls_version=storage.MinimumTlsVersion.TLS1_2, + network_rule_set=storage.NetworkRuleSetArgs( + bypass=storage.Bypass.AZURE_SERVICES, + default_action=storage.DefaultAction.DENY, + ip_rules=ip_rules, + virtual_network_rules=[ + storage.VirtualNetworkRuleArgs( + virtual_network_resource_id=props.subnet_id, + ) + ], + ), + public_network_access=storage.PublicNetworkAccess.ENABLED, + resource_group_name=props.resource_group_name, + sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS), + opts=child_opts, + tags=child_tags, + ) + + # Add diagnostic setting for blobs + insights.DiagnosticSetting( + f"{self.storage_account._name}_diagnostic_setting", + name=f"{self.storage_account._name}_diagnostic_setting", + log_analytics_destination_type="Dedicated", + logs=[ + { + "category_group": "allLogs", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + { + "category_group": "audit", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + ], + metrics=[ + { + "category": "Transaction", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + } + ], + resource_uri=self.storage_account.id.apply( + # This is the URI of the blobServices resource which is automatically + # created. + lambda resource_id: resource_id + + "/blobServices/default" + ), + workspace_id=props.log_analytics_workspace.id, + ) + + self.register_outputs({}) diff --git a/data_safe_haven/infrastructure/components/wrapped/__init__.py b/data_safe_haven/infrastructure/components/wrapped/__init__.py index ef6e7374d2..fc5f8c8f61 100644 --- a/data_safe_haven/infrastructure/components/wrapped/__init__.py +++ b/data_safe_haven/infrastructure/components/wrapped/__init__.py @@ -1,7 +1,5 @@ from .log_analytics_workspace import WrappedLogAnalyticsWorkspace -from .nfsv3_storage_account import WrappedNFSV3StorageAccount __all__ = [ - "WrappedNFSV3StorageAccount", "WrappedLogAnalyticsWorkspace", ] diff --git a/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py b/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py deleted file mode 100644 index e259de4806..0000000000 --- a/data_safe_haven/infrastructure/components/wrapped/nfsv3_storage_account.py +++ /dev/null @@ -1,79 +0,0 @@ -from collections.abc import Mapping, Sequence - -from pulumi import Input, Output, ResourceOptions -from pulumi_azure_native import storage - -from data_safe_haven.external import AzureIPv4Range -from data_safe_haven.types import AzureServiceTag - - -class WrappedNFSV3StorageAccount(storage.StorageAccount): - encryption_args = storage.EncryptionArgs( - key_source=storage.KeySource.MICROSOFT_STORAGE, - services=storage.EncryptionServicesArgs( - blob=storage.EncryptionServiceArgs( - enabled=True, key_type=storage.KeyType.ACCOUNT - ), - file=storage.EncryptionServiceArgs( - enabled=True, key_type=storage.KeyType.ACCOUNT - ), - ), - ) - - def __init__( - self, - resource_name: str, - *, - account_name: Input[str], - allowed_ip_addresses: Input[Sequence[str]] | None, - allowed_service_tag: AzureServiceTag | None, - location: Input[str], - resource_group_name: Input[str], - subnet_id: Input[str], - opts: ResourceOptions, - tags: Input[Mapping[str, Input[str]]], - ): - if allowed_service_tag == AzureServiceTag.INTERNET: - default_action = storage.DefaultAction.ALLOW - ip_rules = [] - else: - default_action = storage.DefaultAction.DENY - ip_rules = Output.from_input(allowed_ip_addresses).apply( - lambda ip_ranges: [ - storage.IPRuleArgs( - action=storage.Action.ALLOW, - i_p_address_or_range=str(ip_address), - ) - for ip_range in sorted(ip_ranges) - for ip_address in AzureIPv4Range.from_cidr(ip_range).all_ips() - ] - ) - - self.resource_group_name_ = Output.from_input(resource_group_name) - super().__init__( - resource_name, - account_name=account_name, - allow_blob_public_access=False, - enable_https_traffic_only=True, - enable_nfs_v3=True, - encryption=self.encryption_args, - is_hns_enabled=True, - kind=storage.Kind.BLOCK_BLOB_STORAGE, - location=location, - minimum_tls_version=storage.MinimumTlsVersion.TLS1_2, - network_rule_set=storage.NetworkRuleSetArgs( - bypass=storage.Bypass.AZURE_SERVICES, - default_action=default_action, - ip_rules=ip_rules, - virtual_network_rules=[ - storage.VirtualNetworkRuleArgs( - virtual_network_resource_id=subnet_id, - ) - ], - ), - public_network_access=storage.PublicNetworkAccess.ENABLED, - resource_group_name=resource_group_name, - sku=storage.SkuArgs(name=storage.SkuName.PREMIUM_ZRS), - opts=opts, - tags=tags, - ) diff --git a/data_safe_haven/infrastructure/programs/declarative_sre.py b/data_safe_haven/infrastructure/programs/declarative_sre.py index 15989bbe7b..2228078c36 100644 --- a/data_safe_haven/infrastructure/programs/declarative_sre.py +++ b/data_safe_haven/infrastructure/programs/declarative_sre.py @@ -163,12 +163,27 @@ def __call__(self) -> None: ), ) + # Deploy monitoring + monitoring = SREMonitoringComponent( + "sre_monitoring", + self.stack_name, + SREMonitoringProps( + dns_private_zones=dns.private_zones, + location=self.config.azure.location, + resource_group_name=resource_group.name, + subnet=networking.subnet_monitoring, + timezone=self.config.sre.timezone, + ), + tags=self.tags, + ) + # Deploy SRE firewall SREFirewallComponent( "sre_firewall", self.stack_name, SREFirewallProps( location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group_name=resource_group.name, route_table_name=networking.route_table_name, subnet_apt_proxy_server=networking.subnet_apt_proxy_server, @@ -196,6 +211,7 @@ def __call__(self) -> None: dns_record=networking.shm_ns_record, dns_server_admin_password=dns.password_admin, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group=resource_group, sre_fqdn=networking.sre_fqdn, storage_quota_gb_home=self.config.sre.storage_quota_gb.home, @@ -217,6 +233,7 @@ def __call__(self) -> None: containers_subnet=networking.subnet_apt_proxy_server, dns_server_ip=dns.ip_address, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group_name=resource_group.name, sre_fqdn=networking.sre_fqdn, storage_account_key=data.storage_account_data_configuration_key, @@ -233,6 +250,7 @@ def __call__(self) -> None: dns_server_ip=dns.ip_address, dockerhub_credentials=dockerhub_credentials, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group_name=resource_group.name, sre_fqdn=networking.sre_fqdn, storage_account_key=data.storage_account_data_configuration_key, @@ -253,6 +271,7 @@ def __call__(self) -> None: entra_application_secret=entra.identity_application_secret, entra_tenant_id=shm_entra_tenant_id, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group_name=resource_group.name, shm_fqdn=shm_fqdn, sre_fqdn=networking.sre_fqdn, @@ -299,6 +318,7 @@ def __call__(self) -> None: ldap_user_filter=ldap_user_filter, ldap_user_search_base=ldap_user_search_base, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group_name=resource_group.name, storage_account_key=data.storage_account_data_configuration_key, storage_account_name=data.storage_account_data_configuration_name, @@ -325,6 +345,7 @@ def __call__(self) -> None: ldap_username_attribute=ldap_username_attribute, ldap_user_search_base=ldap_user_search_base, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, nexus_admin_password=data.password_nexus_admin, resource_group_name=resource_group.name, software_packages=self.config.sre.software_packages, @@ -339,20 +360,6 @@ def __call__(self) -> None: tags=self.tags, ) - # Deploy monitoring - monitoring = SREMonitoringComponent( - "sre_monitoring", - self.stack_name, - SREMonitoringProps( - dns_private_zones=dns.private_zones, - location=self.config.azure.location, - resource_group_name=resource_group.name, - subnet=networking.subnet_monitoring, - timezone=self.config.sre.timezone, - ), - tags=self.tags, - ) - # Deploy desired state desired_state = SREDesiredStateComponent( "sre_desired_state", @@ -371,6 +378,7 @@ def __call__(self) -> None: ldap_user_filter=ldap_user_filter, ldap_user_search_base=ldap_user_search_base, location=self.config.azure.location, + log_analytics_workspace=monitoring.log_analytics, resource_group=resource_group, software_repository_hostname=user_services.software_repositories.hostname, subnet_desired_state=networking.subnet_desired_state, @@ -420,4 +428,5 @@ def __call__(self) -> None: pulumi.export("data", data.exports) pulumi.export("ldap", ldap_group_names) pulumi.export("remote_desktop", remote_desktop.exports) + pulumi.export("sre_fqdn", networking.sre_fqdn) pulumi.export("workspaces", workspaces.exports) diff --git a/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py b/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py index ff1cb4b0da..d58a17a6de 100644 --- a/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py +++ b/data_safe_haven/infrastructure/programs/sre/apt_proxy_server.py @@ -12,6 +12,7 @@ FileShareFileProps, LocalDnsRecordComponent, LocalDnsRecordProps, + WrappedLogAnalyticsWorkspace, ) from data_safe_haven.types import PermittedDomains @@ -24,6 +25,7 @@ def __init__( containers_subnet: Input[str], dns_server_ip: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], sre_fqdn: Input[str], storage_account_key: Input[str], @@ -34,6 +36,7 @@ def __init__( ) self.dns_server_ip = dns_server_ip self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.sre_fqdn = sre_fqdn self.storage_account_key = storage_account_key @@ -119,6 +122,12 @@ def __init__( ], ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py b/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py index 203334a21b..e6f81df6cb 100644 --- a/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py +++ b/data_safe_haven/infrastructure/programs/sre/clamav_mirror.py @@ -11,6 +11,7 @@ from data_safe_haven.infrastructure.components import ( LocalDnsRecordComponent, LocalDnsRecordProps, + WrappedLogAnalyticsWorkspace, ) @@ -22,6 +23,7 @@ def __init__( dns_server_ip: Input[str], dockerhub_credentials: DockerHubCredentials, location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], sre_fqdn: Input[str], storage_account_key: Input[str], @@ -31,6 +33,7 @@ def __init__( self.dns_server_ip = dns_server_ip self.dockerhub_credentials = dockerhub_credentials self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.sre_fqdn = sre_fqdn self.storage_account_key = storage_account_key @@ -95,6 +98,12 @@ def __init__( ], ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/data.py b/data_safe_haven/infrastructure/programs/sre/data.py index 711b76139f..10732670f5 100644 --- a/data_safe_haven/infrastructure/programs/sre/data.py +++ b/data_safe_haven/infrastructure/programs/sre/data.py @@ -7,6 +7,7 @@ from pulumi import ComponentResource, Input, Output, ResourceOptions from pulumi_azure_native import ( authorization, + insights, keyvault, managedidentity, network, @@ -31,11 +32,13 @@ from data_safe_haven.infrastructure.components import ( NFSV3BlobContainerComponent, NFSV3BlobContainerProps, + NFSV3StorageAccountComponent, + NFSV3StorageAccountProps, SSLCertificate, SSLCertificateProps, - WrappedNFSV3StorageAccount, + WrappedLogAnalyticsWorkspace, ) -from data_safe_haven.types import AzureDnsZoneNames, AzureServiceTag +from data_safe_haven.types import AzureDnsZoneNames class SREDataProps: @@ -46,11 +49,12 @@ def __init__( admin_email_address: Input[str], admin_group_id: Input[str], admin_ip_addresses: Input[Sequence[str]], - data_provider_ip_addresses: Input[list[str]] | AzureServiceTag, + data_provider_ip_addresses: Input[list[str]], dns_private_zones: Input[dict[str, network.PrivateZone]], dns_record: Input[network.RecordSet], dns_server_admin_password: Input[pulumi_random.RandomPassword], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group: Input[resources.ResourceGroup], sre_fqdn: Input[str], storage_quota_gb_home: Input[int], @@ -69,6 +73,7 @@ def __init__( self.dns_record = dns_record self.password_dns_server_admin = dns_server_admin_password self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg) self.resource_group_name = Output.from_input(resource_group).apply( get_name_from_rg @@ -106,18 +111,13 @@ def __init__( child_opts = ResourceOptions.merge(opts, ResourceOptions(parent=self)) child_tags = {"component": "data"} | (tags if tags else {}) - if isinstance(props.data_provider_ip_addresses, list): - data_private_sensitive_service_tag = None - data_private_sensitive_ip_addresses = Output.all( - props.data_configuration_ip_addresses, props.data_provider_ip_addresses - ).apply( - lambda address_lists: { - ip for address_list in address_lists for ip in address_list - } - ) - else: - data_private_sensitive_ip_addresses = None - data_private_sensitive_service_tag = props.data_provider_ip_addresses + data_private_sensitive_ip_addresses = Output.all( + props.data_configuration_ip_addresses, props.data_provider_ip_addresses + ).apply( + lambda address_lists: { + ip for address_list in address_lists for ip in address_list + } + ) # Define Key Vault reader identity_key_vault_reader = managedidentity.UserAssignedIdentity( @@ -421,6 +421,45 @@ def __init__( resource_group_name=kwargs["resource_group_name"], ) ) + # Add diagnostic setting for files + insights.DiagnosticSetting( + f"{storage_account_data_configuration._name}_diagnostic_setting", + name=f"{storage_account_data_configuration._name}_diagnostic_setting", + log_analytics_destination_type="Dedicated", + logs=[ + { + "category_group": "allLogs", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + { + "category_group": "audit", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + ], + metrics=[ + { + "category": "Transaction", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + } + ], + # This is the URI of the automatically created fileService resource + resource_uri=Output.concat( + storage_account_data_configuration.id, "/fileServices/default" + ), + workspace_id=props.log_analytics_workspace.id, + ) # Set up a private endpoint for the configuration data storage account storage_account_data_configuration_private_endpoint = network.PrivateEndpoint( f"{storage_account_data_configuration._name}_private_endpoint", @@ -467,20 +506,25 @@ def __init__( # Deploy sensitive data blob storage account # - This holds the /mnt/input and /mnt/output containers that are mounted by workspaces # - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer - storage_account_data_private_sensitive = WrappedNFSV3StorageAccount( + component_data_private_sensitive = NFSV3StorageAccountComponent( f"{self._name}_storage_account_data_private_sensitive", - # Storage account names have a maximum of 24 characters - account_name=alphanumeric( - f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}" - )[:24], - allowed_ip_addresses=data_private_sensitive_ip_addresses, - allowed_service_tag=data_private_sensitive_service_tag, - location=props.location, - subnet_id=props.subnet_data_private_id, - resource_group_name=props.resource_group_name, + NFSV3StorageAccountProps( + # Storage account names have a maximum of 24 characters + account_name=alphanumeric( + f"{''.join(truncate_tokens(stack_name.split('-'), 11))}sensitivedata{sha256hash(self._name)}" + )[:24], + allowed_ip_addresses=data_private_sensitive_ip_addresses, + location=props.location, + log_analytics_workspace=props.log_analytics_workspace, + subnet_id=props.subnet_data_private_id, + resource_group_name=props.resource_group_name, + ), opts=child_opts, tags=child_tags, ) + storage_account_data_private_sensitive = ( + component_data_private_sensitive.storage_account + ) # Deploy storage containers NFSV3BlobContainerComponent( f"{self._name}_blob_egress", @@ -615,6 +659,45 @@ def __init__( opts=child_opts, tags=child_tags, ) + # Add diagnostic setting for files + insights.DiagnosticSetting( + f"{storage_account_data_private_user._name}_diagnostic_setting", + name=f"{storage_account_data_private_user._name}_diagnostic_setting", + log_analytics_destination_type="Dedicated", + logs=[ + { + "category_group": "allLogs", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + { + "category_group": "audit", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + ], + metrics=[ + { + "category": "Transaction", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + } + ], + # This is the URI of the automatically created fileService resource + resource_uri=Output.concat( + storage_account_data_private_user.id, "/fileServices/default" + ), + workspace_id=props.log_analytics_workspace.id, + ) storage.FileShare( f"{storage_account_data_private_user._name}_files_home", access_tier=storage.ShareAccessTier.PREMIUM, diff --git a/data_safe_haven/infrastructure/programs/sre/desired_state.py b/data_safe_haven/infrastructure/programs/sre/desired_state.py index c4392f5210..7b638502a0 100644 --- a/data_safe_haven/infrastructure/programs/sre/desired_state.py +++ b/data_safe_haven/infrastructure/programs/sre/desired_state.py @@ -31,7 +31,9 @@ from data_safe_haven.infrastructure.components import ( NFSV3BlobContainerComponent, NFSV3BlobContainerProps, - WrappedNFSV3StorageAccount, + NFSV3StorageAccountComponent, + NFSV3StorageAccountProps, + WrappedLogAnalyticsWorkspace, ) from data_safe_haven.resources import resources_path from data_safe_haven.types import AzureDnsZoneNames @@ -55,6 +57,7 @@ def __init__( ldap_user_filter: Input[str], ldap_user_search_base: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group: Input[resources.ResourceGroup], software_repository_hostname: Input[str], subscription_name: Input[str], @@ -73,6 +76,7 @@ def __init__( self.ldap_user_filter = ldap_user_filter self.ldap_user_search_base = ldap_user_search_base self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_id = Output.from_input(resource_group).apply(get_id_from_rg) self.resource_group_name = Output.from_input(resource_group).apply( get_name_from_rg @@ -102,19 +106,22 @@ def __init__( # Deploy desired state storage account # - This holds the /var/local/ansible container that is mounted by workspaces # - Azure blobs have worse NFS support but can be accessed with Azure Storage Explorer - storage_account = WrappedNFSV3StorageAccount( + storage_component = NFSV3StorageAccountComponent( f"{self._name}_storage_account", - account_name=alphanumeric( - f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}" - )[:24], - allowed_ip_addresses=props.admin_ip_addresses, - allowed_service_tag=None, - location=props.location, - resource_group_name=props.resource_group_name, - subnet_id=props.subnet_desired_state_id, + NFSV3StorageAccountProps( + account_name=alphanumeric( + f"{''.join(truncate_tokens(stack_name.split('-'), 11))}desiredstate{sha256hash(self._name)}" + )[:24], + allowed_ip_addresses=props.admin_ip_addresses, + location=props.location, + log_analytics_workspace=props.log_analytics_workspace, + resource_group_name=props.resource_group_name, + subnet_id=props.subnet_desired_state_id, + ), opts=child_opts, tags=child_tags, ) + storage_account = storage_component.storage_account # Deploy desired state share container_desired_state = NFSV3BlobContainerComponent( f"{self._name}_blob_desired_state", diff --git a/data_safe_haven/infrastructure/programs/sre/firewall.py b/data_safe_haven/infrastructure/programs/sre/firewall.py index 97f7a885b7..ed831e826a 100644 --- a/data_safe_haven/infrastructure/programs/sre/firewall.py +++ b/data_safe_haven/infrastructure/programs/sre/firewall.py @@ -3,12 +3,13 @@ from collections.abc import Mapping from pulumi import ComponentResource, Input, Output, ResourceOptions -from pulumi_azure_native import network +from pulumi_azure_native import insights, network from data_safe_haven.infrastructure.common import ( get_address_prefixes_from_subnet, get_id_from_subnet, ) +from data_safe_haven.infrastructure.components import WrappedLogAnalyticsWorkspace from data_safe_haven.types import ( FirewallPriorities, ForbiddenDomains, @@ -23,6 +24,7 @@ class SREFirewallProps: def __init__( self, location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], route_table_name: Input[str], subnet_apt_proxy_server: Input[network.GetSubnetResult], @@ -35,6 +37,7 @@ def __init__( subnet_workspaces: Input[network.GetSubnetResult], ) -> None: self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.route_table_name = route_table_name self.subnet_apt_proxy_server_prefixes = Output.from_input( @@ -331,6 +334,36 @@ def __init__( tags=child_tags, ) + # Add diagnostic settings for firewall + # This links the firewall to the log analytics workspace + insights.DiagnosticSetting( + f"{self._name}_firewall_diagnostic_settings", + name="firewall_diagnostic_settings", + log_analytics_destination_type="Dedicated", + logs=[ + { + "category_group": "allLogs", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + }, + ], + metrics=[ + { + "category": "AllMetrics", + "enabled": True, + "retention_policy": { + "days": 0, + "enabled": False, + }, + } + ], + resource_uri=firewall.id, + workspace_id=props.log_analytics_workspace.id, + ) + # Retrieve the private IP address for the firewall private_ip_address = firewall.ip_configurations.apply( lambda cfgs: "" if not cfgs else cfgs[0].private_ip_address diff --git a/data_safe_haven/infrastructure/programs/sre/gitea_server.py b/data_safe_haven/infrastructure/programs/sre/gitea_server.py index ab85ee51d8..2690de9c79 100644 --- a/data_safe_haven/infrastructure/programs/sre/gitea_server.py +++ b/data_safe_haven/infrastructure/programs/sre/gitea_server.py @@ -14,6 +14,7 @@ LocalDnsRecordProps, PostgresqlDatabaseComponent, PostgresqlDatabaseProps, + WrappedLogAnalyticsWorkspace, ) from data_safe_haven.resources import resources_path from data_safe_haven.utility import FileReader @@ -35,6 +36,7 @@ def __init__( ldap_user_filter: Input[str], ldap_user_search_base: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], sre_fqdn: Input[str], storage_account_key: Input[str], @@ -55,6 +57,7 @@ def __init__( self.ldap_user_filter = ldap_user_filter self.ldap_user_search_base = ldap_user_search_base self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.sre_fqdn = sre_fqdn self.storage_account_key = storage_account_key @@ -272,6 +275,12 @@ def __init__( ], ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py b/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py index 24cb858e68..d35efa81c5 100644 --- a/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py +++ b/data_safe_haven/infrastructure/programs/sre/hedgedoc_server.py @@ -15,6 +15,7 @@ LocalDnsRecordProps, PostgresqlDatabaseComponent, PostgresqlDatabaseProps, + WrappedLogAnalyticsWorkspace, ) from data_safe_haven.resources import resources_path from data_safe_haven.types import Ports @@ -37,6 +38,7 @@ def __init__( ldap_user_search_base: Input[str], ldap_username_attribute: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], sre_fqdn: Input[str], storage_account_key: Input[str], @@ -58,6 +60,7 @@ def __init__( self.ldap_user_search_base = ldap_user_search_base self.ldap_username_attribute = ldap_username_attribute self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.sre_fqdn = sre_fqdn self.storage_account_key = storage_account_key @@ -253,6 +256,12 @@ def __init__( ], ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/identity.py b/data_safe_haven/infrastructure/programs/sre/identity.py index 7839853384..4b06420190 100644 --- a/data_safe_haven/infrastructure/programs/sre/identity.py +++ b/data_safe_haven/infrastructure/programs/sre/identity.py @@ -13,6 +13,7 @@ from data_safe_haven.infrastructure.components import ( LocalDnsRecordComponent, LocalDnsRecordProps, + WrappedLogAnalyticsWorkspace, ) @@ -27,6 +28,7 @@ def __init__( entra_application_secret: Input[str], entra_tenant_id: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], shm_fqdn: Input[str], sre_fqdn: Input[str], @@ -40,6 +42,7 @@ def __init__( self.entra_application_secret = entra_application_secret self.entra_tenant_id = entra_tenant_id self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.shm_fqdn = shm_fqdn self.sre_fqdn = sre_fqdn @@ -163,6 +166,12 @@ def __init__( ], ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/remote_desktop.py b/data_safe_haven/infrastructure/programs/sre/remote_desktop.py index e2df83ede5..ba1e8b9816 100644 --- a/data_safe_haven/infrastructure/programs/sre/remote_desktop.py +++ b/data_safe_haven/infrastructure/programs/sre/remote_desktop.py @@ -15,6 +15,7 @@ FileShareFileProps, PostgresqlDatabaseComponent, PostgresqlDatabaseProps, + WrappedLogAnalyticsWorkspace, ) from data_safe_haven.resources import resources_path from data_safe_haven.utility import FileReader @@ -40,6 +41,7 @@ def __init__( ldap_user_filter: Input[str], ldap_user_search_base: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], resource_group_name: Input[str], storage_account_key: Input[str], storage_account_name: Input[str], @@ -65,6 +67,7 @@ def __init__( self.ldap_user_filter = ldap_user_filter self.ldap_user_search_base = ldap_user_search_base self.location = location + self.log_analytics_workspace = log_analytics_workspace self.resource_group_name = resource_group_name self.storage_account_key = storage_account_key self.storage_account_name = storage_account_name @@ -348,6 +351,12 @@ def __init__( ), ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/software_repositories.py b/data_safe_haven/infrastructure/programs/sre/software_repositories.py index 013c9ffcdd..420ca5c5a2 100644 --- a/data_safe_haven/infrastructure/programs/sre/software_repositories.py +++ b/data_safe_haven/infrastructure/programs/sre/software_repositories.py @@ -14,6 +14,7 @@ FileShareFileProps, LocalDnsRecordComponent, LocalDnsRecordProps, + WrappedLogAnalyticsWorkspace, ) from data_safe_haven.resources import resources_path from data_safe_haven.types import Ports, SoftwarePackageCategory @@ -28,6 +29,7 @@ def __init__( dns_server_ip: Input[str], dockerhub_credentials: DockerHubCredentials, location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], nexus_admin_password: Input[str], resource_group_name: Input[str], software_packages: SoftwarePackageCategory, @@ -39,6 +41,7 @@ def __init__( self.dns_server_ip = dns_server_ip self.dockerhub_credentials = dockerhub_credentials self.location = location + self.log_analytics_workspace = log_analytics_workspace self.nexus_admin_password = Output.secret(nexus_admin_password) self.nexus_packages: str | None = { SoftwarePackageCategory.ANY: "all", @@ -250,6 +253,12 @@ def __init__( ], ), ], + diagnostics=containerinstance.ContainerGroupDiagnosticsArgs( + log_analytics=containerinstance.LogAnalyticsArgs( + workspace_id=props.log_analytics_workspace.workspace_id, + workspace_key=props.log_analytics_workspace.workspace_key, + ), + ), dns_config=containerinstance.DnsConfigurationArgs( name_servers=[props.dns_server_ip], ), diff --git a/data_safe_haven/infrastructure/programs/sre/user_services.py b/data_safe_haven/infrastructure/programs/sre/user_services.py index 5eb04bdfbb..1418b3d11f 100644 --- a/data_safe_haven/infrastructure/programs/sre/user_services.py +++ b/data_safe_haven/infrastructure/programs/sre/user_services.py @@ -7,6 +7,7 @@ DockerHubCredentials, get_id_from_subnet, ) +from data_safe_haven.infrastructure.components import WrappedLogAnalyticsWorkspace from data_safe_haven.types import DatabaseSystem, SoftwarePackageCategory from .database_servers import SREDatabaseServerComponent, SREDatabaseServerProps @@ -35,6 +36,7 @@ def __init__( ldap_user_filter: Input[str], ldap_user_search_base: Input[str], location: Input[str], + log_analytics_workspace: Input[WrappedLogAnalyticsWorkspace], nexus_admin_password: Input[str], resource_group_name: Input[str], software_packages: SoftwarePackageCategory, @@ -58,6 +60,7 @@ def __init__( self.ldap_user_filter = ldap_user_filter self.ldap_user_search_base = ldap_user_search_base self.location = location + self.log_analytics_workspace = log_analytics_workspace self.nexus_admin_password = Output.secret(nexus_admin_password) self.resource_group_name = resource_group_name self.software_packages = software_packages @@ -109,6 +112,7 @@ def __init__( ldap_user_filter=props.ldap_user_filter, ldap_user_search_base=props.ldap_user_search_base, location=props.location, + log_analytics_workspace=props.log_analytics_workspace, resource_group_name=props.resource_group_name, sre_fqdn=props.sre_fqdn, storage_account_key=props.storage_account_key, @@ -134,6 +138,7 @@ def __init__( ldap_user_filter=props.ldap_user_filter, ldap_user_search_base=props.ldap_user_search_base, location=props.location, + log_analytics_workspace=props.log_analytics_workspace, resource_group_name=props.resource_group_name, sre_fqdn=props.sre_fqdn, storage_account_key=props.storage_account_key, @@ -151,6 +156,7 @@ def __init__( dns_server_ip=props.dns_server_ip, dockerhub_credentials=props.dockerhub_credentials, location=props.location, + log_analytics_workspace=props.log_analytics_workspace, nexus_admin_password=props.nexus_admin_password, resource_group_name=props.resource_group_name, sre_fqdn=props.sre_fqdn, diff --git a/data_safe_haven/types/__init__.py b/data_safe_haven/types/__init__.py index 728df06c19..bfe1f6898a 100644 --- a/data_safe_haven/types/__init__.py +++ b/data_safe_haven/types/__init__.py @@ -34,14 +34,14 @@ "AzureDnsZoneNames", "AzureLocation", "AzurePremiumFileShareSize", - "AzureServiceTag", "AzureSdkCredentialScope", + "AzureServiceTag", "AzureSubscriptionName", "AzureVmSku", "DatabaseSystem", "EmailAddress", - "EntraApplicationId", "EntraAppPermissionType", + "EntraApplicationId", "EntraGroupName", "EntraSignInAudienceType", "FirewallPriorities", diff --git a/data_safe_haven/version.py b/data_safe_haven/version.py index 0513a64c8f..6a7d91a4eb 100644 --- a/data_safe_haven/version.py +++ b/data_safe_haven/version.py @@ -1,2 +1,2 @@ -__version__ = "5.0.1" +__version__ = "5.1.0" __version_info__ = tuple(__version__.split(".")) diff --git a/docs/source/conf.py b/docs/source/conf.py index f262d36dc2..a1e2c34385 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -55,6 +55,7 @@ extensions = [ "myst_parser", "sphinx_togglebutton", + "sphinxcontrib.typer", ] # Add any paths that contain templates here, relative to this directory. diff --git a/docs/source/contributing/index.md b/docs/source/contributing/index.md index e5f99fd3a0..20c14073bf 100644 --- a/docs/source/contributing/index.md +++ b/docs/source/contributing/index.md @@ -13,7 +13,7 @@ Benjamin Walden
Benjamin Walden

๐Ÿ“– ๐Ÿค” ๐Ÿ› ๐Ÿ“† ๐Ÿ““ Brett Todd
Brett Todd

๐Ÿ’ป ๐Ÿค” Callum Mole
Callum Mole

๐Ÿ› ๐Ÿ’ป - Carlos Gavidia-Calderon
Carlos Gavidia-Calderon

๐Ÿ› ๐Ÿค” + Carlos Gavidia-Calderon
Carlos Gavidia-Calderon

๐Ÿ› ๐Ÿค” ๐Ÿ“– Catalina Vallejos
Catalina Vallejos

๐Ÿ–‹ diff --git a/docs/source/deployment/deploy_shm.md b/docs/source/deployment/deploy_shm.md index b26d451bfb..ef6b7090f5 100644 --- a/docs/source/deployment/deploy_shm.md +++ b/docs/source/deployment/deploy_shm.md @@ -54,7 +54,7 @@ Before deploying the Safe Haven Management (SHM) infrastructure you need to deci Use the short name without spaces, _e.g._ **uksouth** not **UK South** ::: -Once you've decided on these, run the following command: [approx 5 minutes]: +Once you've decided on these, run the {typer}`dsh-shm-deploy` command: [approx 5 minutes]: :::{code} shell $ dsh shm deploy --entra-tenant-id YOUR_ENTRA_TENANT_ID \ diff --git a/docs/source/deployment/deploy_sre.md b/docs/source/deployment/deploy_sre.md index 7ff2a643e2..a3f220a023 100644 --- a/docs/source/deployment/deploy_sre.md +++ b/docs/source/deployment/deploy_sre.md @@ -21,6 +21,7 @@ $ az provider register --name Microsoft.Network Each project will have its own dedicated SRE. - Create a configuration file (optionally starting from one of our standard {ref}`policy_classification_sensitivity_tiers`) +- The {typer}`dsh-config-template` command provides template configuration files ::::{admonition} EITHER start from a blank template :class: dropdown note @@ -167,7 +168,7 @@ As some general recommendations, - For general purpose use, the D family gives decent performance and a good balance of CPU and memory. The [Dsv6 series](https://learn.microsoft.com/en-us/azure/virtual-machines/sizes/general-purpose/dsv6-series#sizes-in-series) is a good starting point and can be scaled from 2 CPUs and 8 GB RAM to 128 CPUs and 512 GB RAM. - - `Standard_D8s_v6` should give reasonable performance for a single concurrent user. + - `Standard_D8s_v5` should give reasonable performance for a single concurrent user. - For GPU accelerated work, the NC family provides Nvidia GPUs and a good balance of CPU and memory. In order of increasing throughput, the `NCv3` series features Nvidia V100 GPUs, the `NC_A100_v4` series features Nvidia A100 GPUs, and the `NCads_H100_v5` series features Nvidia H100 GPUs. - `Stanard_NC6s_v3` should give reasonable performance for a single concurrent user with AI/ML workloads. @@ -254,7 +255,7 @@ If you want to make changes to the config, edit this file and then run `dsh conf ## Deployment -- Deploy each SRE individually [approx 30 minutes]: +- Deploy each SRE individually using {typer}`dsh sre deploy` [approx 30 minutes]: :::{code} shell $ dsh sre deploy YOUR_SRE_NAME diff --git a/docs/source/deployment/security_checklist.md b/docs/source/deployment/security_checklist.md index 2737b1cb5c..7c6036402a 100644 --- a/docs/source/deployment/security_checklist.md +++ b/docs/source/deployment/security_checklist.md @@ -50,7 +50,7 @@ In each SRE configuration ### Accounts -[Create a user account](../management/index.md#add-users-to-the-data-safe-haven) for the research user in your SHM. +[Create a user account](../management/user.md#add-users-to-the-data-safe-haven) for the research user in your SHM. Do not register this user with any SRE yet. ## 1. Multifactor authentication and password strength diff --git a/docs/source/index.md b/docs/source/index.md index 764b06ec38..2c3be84b84 100644 --- a/docs/source/index.md +++ b/docs/source/index.md @@ -9,6 +9,7 @@ design/index.md deployment/index.md management/index.md roles/index.md +reference/index.md contributing/index.md ::: @@ -42,6 +43,8 @@ You can read them through in order or simply jump to the section that you are mo - If you want to deploy your own Data Safe Haven. - [**Management**](management/index.md) - If you want to manage a Data Safe Haven that you (or someone else) has deployed. +- [**CLI Reference**](reference/index.md) + - If you want details of all the Command Line Interface commands. - [**Roles**](roles/index.md) - Information about the different user roles in the Data Safe Haven. - Instructions and advice for the actions of different user roles. diff --git a/docs/source/management/data.md b/docs/source/management/data.md new file mode 100644 index 0000000000..9cacaf3806 --- /dev/null +++ b/docs/source/management/data.md @@ -0,0 +1,72 @@ +# Managing data ingress and egress + +## Data ingress + +It is the {ref}`role_data_provider_representative`'s responsibility to upload the data required by the safe haven. + +The following steps show how to generate a temporary, write-only upload token that can be securely sent to the {ref}`role_data_provider_representative`, enabling them to upload the data: + +- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM +- Search for the resource group: `shm--sre--rg`, then click through to the storage account ending with `sensitivedata` +- Browse to **{menuselection}`Settings --> Networking`** and ensure that the data provider's IP address is one of those allowed under the **Firewall** header + - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre` +- Browse to **{menuselection}`Data storage --> Containers`** from the menu on the left hand side +- Click **ingress** +- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following: + - Under **Signing method**, select **User delegation key** + - Under **Permissions**, check these boxes: + - **Write** + - **List** + - Set a 24 hour time window in the **Start and expiry date/time** (or an appropriate length of time) + - Leave everything else as default and click **{guilabel}`Generate SAS token and URL`** + - Copy the **Blob SAS URL** + + ```{image} ingress_token_write_only.png + :alt: write-only SAS token + :align: center + ``` + +- Send the **Blob SAS URL** to the data provider through a secure channel +- The data provider should now be able to upload data +- Validate successful data ingress + - Browse to **{menuselection}`Data storage --> Containers`** (in the middle of the page) + - Select the **ingress** container and ensure that the uploaded files are present + +## Data egress + +```{important} +Assessment of output must be completed **before** an egress link is created. +Outputs are potentially sensitive, and so an appropriate process must be applied to ensure that they are suitable for egress. +``` + +The {ref}`role_system_manager` creates a time-limited and IP restricted link to remove data from the environment. + +- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM +- Search for the resource group: `shm--sre--rg`, then click through to the storage account ending with `sensitivedata` +- Browse to **{menuselection}`Settings --> Networking`** and check the list of pre-approved IP addresses allowed under the **Firewall** header + - Ensure that the IP address of the person to receive the outputs is listed + - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre` +- Browse to **{menuselection}`Data storage --> Containers`** +- Select the **egress** container +- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following: + - Under **Signing method**, select **User delegation key** + - Under **Permissions**, check these boxes: + - **Read** + - **List** + - Set a time window in the **Start and expiry date/time** that gives enough time for the person who will perform the secure egress download to do so + - Leave everything else as default and press **{guilabel}`Generate SAS token and URL`** + - Copy the **Blob SAS URL** + + ```{image} egress_token_read_only.png + :alt: Read-only SAS token + :align: center + ``` + +- Send the **Blob SAS URL** to the relevant person through a secure channel +- The appropriate person should now be able to download data + +## The output volume + +Once you have set up the egress connection in Azure Storage Explorer, you should be able to view data from the **output volume**, a read-write area intended for the extraction of results, such as figures for publication. +On the workspaces, this volume is `/mnt/output` and is shared between all workspaces in an SRE. +For more information on shared SRE storage volumes, consult the {ref}`Safe Haven User Guide `. diff --git a/docs/source/management/index.md b/docs/source/management/index.md index e9f49a5733..f8cd8ac0e0 100644 --- a/docs/source/management/index.md +++ b/docs/source/management/index.md @@ -1,215 +1,12 @@ # Management -## Managing users +:::{toctree} +:hidden: -### Add users to the Data Safe Haven - -:::{important} -You will need a full name, phone number, email address and country for each user. -::: - -1. You can add users directly in your Entra tenant, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users). - -2. Alternatively, you can add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`). - - (Optional) you can provide a `Domain` column if you like but this will otherwise default to the domain of your SHM - - {{warning}} **Phone** must be in [E.123 international format](https://en.wikipedia.org/wiki/E.123) - - {{warning}} **CountryCode** is the two letter [ISO 3166-1 Alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements) code for the country where the user is based - -::::{admonition} Example CSV user file -:class: dropdown tip - -:::{code} text -GivenName;Surname;Phone;Email;CountryCode -Ada;Lovelace;+44800456456;ada@lovelace.me;GB -Grace;Hopper;+18005550100;grace@nasa.gov;US -::: -:::: - -```{code} shell -$ dsh users add PATH_TO_MY_CSV_FILE -``` - -### List available users - -- You can do this from the [Microsoft Entra admin centre](https://entra.microsoft.com/) - - 1. Browse to **{menuselection}`Groups --> All Groups`** - 2. Click on the group named **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** - 3. Browse to **{menuselection}`Manage --> Members`** from the secondary menu on the left side - -- You can do this at the command line by running the following command: - - ```{code} shell - $ dsh users list YOUR_SRE_NAME - ``` - - which will give output like the following - - ``` - โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ - โ”ƒ username โ”ƒ Entra ID โ”ƒ SRE YOUR_SRE_NAME โ”ƒ - โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ - โ”‚ ada.lovelace โ”‚ x โ”‚ x โ”‚ - โ”‚ grace.hopper โ”‚ x โ”‚ x โ”‚ - โ”‚ ursula.franklin โ”‚ x โ”‚ โ”‚ - โ”‚ joan.clarke โ”‚ x โ”‚ โ”‚ - โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ - ``` - -### Assign existing users to an SRE - -1. You can do this directly in your Entra tenant by adding them to the **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** group, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/groups-view-azure-portal#add-a-group-member). - -2. Alternatively, you can add multiple users from the command line: - - ```{code} shell - $ dsh users register YOUR_SRE_NAME -u USERNAME_1 -u USERNAME_2 - ``` - - where you must specify the usernames for each user you want to add to this SRE. - - :::{important} - Do not include the Entra ID domain part of the username, just the part before the @. - ::: - -### Manually register users for self-service password reset - -:::{tip} -Users created via the `dsh users` command line tool will be automatically registered for SSPR. +user.md +sre.md +data.md +logs.md ::: -If you have manually created a user and want to enable SSPR, do the following - -- Go to the [Microsoft Entra admin centre](https://entra.microsoft.com/) -- Browse to **{menuselection}`Users --> All Users`** -- Select the user you want to enable SSPR for -- On the **{menuselection}`Manage --> Authentication Methods`** page fill out their contact info as follows: - - Ensure that you register **both** a phone number and an email address - - **Phone:** add the user's phone number with a space between the country code and the rest of the number (_e.g._ +44 7700900000) - - **Email:** enter the user's email address here - - Click the **{guilabel}`Save`** icon in the top panel - -## Managing SREs - -### List available SRE configurations and deployment status - -- Run the following if you want to check what SRE configurations are available in the current context, and whether those SREs are deployed - -```{code} shell -$ dsh config available -``` - -which will give output like the following - -```{code} shell -Available SRE configurations for context 'green': -โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ -โ”ƒ SRE Name โ”ƒ Deployed โ”ƒ -โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ -โ”‚ emerald โ”‚ x โ”‚ -โ”‚ jade โ”‚ โ”‚ -โ”‚ olive โ”‚ โ”‚ -โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ -``` - -### Remove a deployed Data Safe Haven - -- Run the following if you want to teardown a deployed SRE: - -```{code} shell -$ dsh sre teardown YOUR_SRE_NAME -``` - -::::{admonition} Tearing down an SRE is destructive and irreversible -:class: danger -Running `dsh sre teardown` will destroy **all** resources deployed within the SRE. -Ensure that any desired outputs have been extracted before deleting the SRE. -**All** data remaining on the SRE will be deleted. -The user groups for the SRE on Microsoft Entra ID will also be deleted. -:::: - -- Run the following if you want to teardown the deployed SHM: - -```{code} shell -$ dsh shm teardown -``` - -::::{admonition} Tearing down an SHM -:class: warning -Tearing down the SHM permanently deletes **all** remotely stored configuration and state data. -Tearing down the SHM also renders the SREs inaccessible to users and prevents them from being fully managed using the CLI. -All SREs associated with the SHM should be torn down before the SHM is torn down. -:::: - -## Managing data ingress and egress - -### Data Ingress - -It is the {ref}`role_data_provider_representative`'s responsibility to upload the data required by the safe haven. - -The following steps show how to generate a temporary, write-only upload token that can be securely sent to the {ref}`role_data_provider_representative`, enabling them to upload the data: - -- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM -- Search for the resource group: `shm--sre--rg`, then click through to the storage account ending with `sensitivedata` -- Browse to **{menuselection}`Settings --> Networking`** and ensure that the data provider's IP address is one of those allowed under the **Firewall** header - - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre` -- Browse to **{menuselection}`Data storage --> Containers`** from the menu on the left hand side -- Click **ingress** -- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following: - - Under **Signing method**, select **User delegation key** - - Under **Permissions**, check these boxes: - - **Write** - - **List** - - Set a 24 hour time window in the **Start and expiry date/time** (or an appropriate length of time) - - Leave everything else as default and click **{guilabel}`Generate SAS token and URL`** - - Copy the **Blob SAS URL** - - ```{image} ingress_token_write_only.png - :alt: write-only SAS token - :align: center - ``` - -- Send the **Blob SAS URL** to the data provider through a secure channel -- The data provider should now be able to upload data -- Validate successful data ingress - - Browse to **{menuselection}`Data storage --> Containers`** (in the middle of the page) - - Select the **ingress** container and ensure that the uploaded files are present - -### Data egress - -```{important} -Assessment of output must be completed **before** an egress link is created. -Outputs are potentially sensitive, and so an appropriate process must be applied to ensure that they are suitable for egress. -``` - -The {ref}`role_system_manager` creates a time-limited and IP restricted link to remove data from the environment. - -- In the Azure portal select **Subscriptions** then navigate to the subscription containing the relevant SHM -- Search for the resource group: `shm--sre--rg`, then click through to the storage account ending with `sensitivedata` -- Browse to **{menuselection}`Settings --> Networking`** and check the list of pre-approved IP addresses allowed under the **Firewall** header - - Ensure that the IP address of the person to receive the outputs is listed - - If it is not listed, modify and reupload the SRE configuration and redeploy the SRE using the `dsh` CLI, as per {ref}`deploy_sre` -- Browse to **{menuselection}`Data storage --> Containers`** -- Select the **egress** container -- Browse to **{menuselection}`Settings --> Shared access tokens`** and do the following: - - Under **Signing method**, select **User delegation key** - - Under **Permissions**, check these boxes: - - **Read** - - **List** - - Set a time window in the **Start and expiry date/time** that gives enough time for the person who will perform the secure egress download to do so - - Leave everything else as default and press **{guilabel}`Generate SAS token and URL`** - - Copy the **Blob SAS URL** - - ```{image} egress_token_read_only.png - :alt: Read-only SAS token - :align: center - ``` - -- Send the **Blob SAS URL** to the relevant person through a secure channel -- The appropriate person should now be able to download data - -### The output volume - -Once you have set up the egress connection in Azure Storage Explorer, you should be able to view data from the **output volume**, a read-write area intended for the extraction of results, such as figures for publication. -On the workspaces, this volume is `/mnt/output` and is shared between all workspaces in an SRE. -For more information on shared SRE storage volumes, consult the {ref}`Safe Haven User Guide `. +Running a secure and productive Data Safe Haven requires a manager to conduct tasks which support users and to monitor the correct operation of the TRE. diff --git a/docs/source/management/logs.md b/docs/source/management/logs.md new file mode 100644 index 0000000000..10b9bfb0e5 --- /dev/null +++ b/docs/source/management/logs.md @@ -0,0 +1,128 @@ +# Monitoring logs + +Logs are collected for numerous parts of a Data Safe Haven. +Some of these logs are ingested into a central location, an Azure [Log Analytics Workspace](https://learn.microsoft.com/azure/azure-monitor/logs/log-analytics-workspace-overview), and others are stored separately. + +## Log workspace + +Each SRE has its own Log Analytics Workspace. +You can view the workspaces by going to the Azure portal and navigating to [Log Analytics Workspaces](https://portal.azure.com/#browse/Microsoft.OperationalInsights%2Fworkspaces). +Select which Log Analytics Workspace you want to view by clicking on the workspace named `shm--sre--log`. + +The logs can be filtered using [Kusto Query Language (KQL)](https://learn.microsoft.com/en-us/azure/azure-monitor/logs/log-query-overview). + +## Storage logs + +Depending on how different parts of Data Safe Haven storage are provisioned, logs may differ. + +### Sensitive data logs + +The sensitive data containers are the [ingress and egress containers](./data.md). +Logs from these containers are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`StorageBlobLogs` +: Events occurring on the blob containers. +: For example data being uploaded, extracted or read. + +`AzureMetrics` +: Various metrics on blob container utilisation and performance. +: This table is not reserved for the sensitive data containers and other resources may log to it. + +### Desired state data logs + +The desired state container holds the data necessary to configure virtual machines in an SRE. +Logs from the desired state container are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`StorageBlobLogs` +: Events occurring on the blob containers. +: For example data being uploaded, extracted or read. + +`AzureMetrics` +: Various metrics on blob container utilisation and performance. +: This table is not reserved for the desired state data container and other resources may log to it. + +### User data logs + +The user data file share holds the {ref}`researchers'` [home directories](https://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch03s08.html), where they will store their personal data and configuration. +Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`StorageFileLogs` +: NFS events occurring on the file share. +: For example data being written or directories being accessed + +`AzureMetrics` +: Various metrics on file share utilisation and performance. +: This table is not reserved for the user data share and other resources may log to it. + +### Configuration data logs + +There are multiple configuration data file shares. +Each contains the configuration and state data for the Data Safe Haven [services deployed as containers](#container-logs). +Logs from the share are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`StorageFileLogs` +: SMB events occurring on the file share. +: For example data being written or directories being accessed + +`AzureMetrics` +: Various metrics on file share utilisation and performance. +: This table is not reserved for the configuration data shares and other resources may log to it. + +## Container logs + +Some of the Data Safe Haven infrastructure is provisioned as containers. +These include, + +- remote desktop portal +- package proxy +- Gitea and Hedgedoc + +Logs from all containers are ingested into the [SRE's log analytics workspace](#log-workspace). +There are two tables, + +`ContainerEvents_CL` +: Event logs for the container instance resources such as starting, stopping, crashes and pulling images. + +`ContainerInstanceLog_CL` +: Container process logs. +: This is where you can view the output of the containerised applications and will be useful for debugging problems. + +## Workspace logs + +Logs from all user workspaces are ingested into the [SRE's log analytics workspace](#log-workspace) using the [Azure Monitor Agent](https://learn.microsoft.com/en-us/azure/azure-monitor/agents/azure-monitor-agent-overview). + +There are three tables, + +`Perf` +: Usage statistics for individual workspaces, such as percent memory used and percent disk space used. + +`Syslog` +: [syslog](https://www.paessler.com/it-explained/syslog) events from workspaces. +: Syslog is the _de facto_ standard protocol for logging on Linux and most applications will log to it. +: These logs will be useful for debugging problems with the workspace or workspace software. + +`Heartbeat` +: Verification that the Azure Monitor Agent is present on the workspaces and is able to connect to the [log analytics workspace](#log-workspace). + +## Firewall logs + +The firewall plays a critical role in the security of a Data Safe Haven. +It filters all outbound traffic through a set of FQDN rules so that each component may only reach necessary and allowed domains. + +Logs from the firewall are ingested into the [SREs log workspace](#log-workspace). +There are three tables, + +`AZFWApplicationRule` +: Logs from the firewalls FDQN filters. +: Shows requests to the outside of the Data Safe Haven and why they have been approved or rejected. + +`AZFWDnsQuery` +: DNS requests handled by the firewall. + +`AzureMetrics` +: Various metrics on firewall utilisation and performance. +: This table is not reserved for the firewall and other resources may log to it. diff --git a/docs/source/management/sre.md b/docs/source/management/sre.md new file mode 100644 index 0000000000..fddaa5e135 --- /dev/null +++ b/docs/source/management/sre.md @@ -0,0 +1,88 @@ +# Managing SREs + +## List available SRE configurations and deployment status + +- Use {typer}`dsh config available` to check what SRE configurations are available in the current context, and whether those SREs are deployed. + +```{code} shell +$ dsh config available +``` + +will give output like the following + +```{code} shell +Available SRE configurations for context 'green': +โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ +โ”ƒ SRE Name โ”ƒ Deployed โ”ƒ +โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ +โ”‚ emerald โ”‚ x โ”‚ +โ”‚ jade โ”‚ โ”‚ +โ”‚ olive โ”‚ โ”‚ +โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ +``` + +## Remove a deployed Data Safe Haven + +- Use {typer}`dsh sre teardown` to teardown a deployed SRE: + +```{code} shell +$ dsh sre teardown YOUR_SRE_NAME +``` + +::::{admonition} Tearing down an SRE is destructive and irreversible +:class: danger +Running `dsh sre teardown` will destroy **all** resources deployed within the SRE. +Ensure that any desired outputs have been extracted before deleting the SRE. +**All** data remaining on the SRE will be deleted. +The user groups for the SRE on Microsoft Entra ID will also be deleted. +:::: + +- Use {typer}`dsh shm teardown` if you want to teardown the deployed SHM: + +```{code} shell +$ dsh shm teardown +``` + +::::{admonition} Tearing down an SHM +:class: warning +Tearing down the SHM permanently deletes **all** remotely stored configuration and state data. +Tearing down the SHM also renders the SREs inaccessible to users and prevents them from being fully managed using the CLI. +All SREs associated with the SHM should be torn down before the SHM is torn down. +:::: + +## Updating SREs + +SREs are modified by updating the configuration then running the deploy command. + +- The existing configuration for the SRE can be shown using {typer}`dsh config show`: + +```{code} shell +$ dsh config show YOUR_SRE_NAME +``` + +- If you do not have a local copy, you can write one with the `--file` option: + +```{code} shell +$ dsh config show YOUR_SRE_NAME --file YOUR_SRE_NAME.yaml +``` + +- Edit the configuration file locally, and upload the new version using {typer}`dsh config upload`: + +```{code} shell +$ dsh config upload YOUR_SRE_NAME.yaml +``` + +- You will be shown the differences between the existing configuration and the new configuration and asked to confirm that they are correct. +- Finally, deploy your SRE using {typer}`dsh sre deploy` to apply any changes: + +```{code} shell +$ dsh sre deploy YOUR_SRE_NAME +``` + +::::{admonition} Changing administrator IP addresses +:class: warning +The administrator IP addresses declared in the SRE configuration are used to create access rules for SRE infrastructure. +Therefore, after an SRE has been deployed, some changes can only be made from IP addresses on that list. + +As a consequence, if you want to update the list of administrator IP addresses, for example to add a new administrator, you must do so from an IP address that is already allowed. +:::: diff --git a/docs/source/management/user.md b/docs/source/management/user.md new file mode 100644 index 0000000000..337af2302f --- /dev/null +++ b/docs/source/management/user.md @@ -0,0 +1,84 @@ +# Managing users + +## Add users to the Data Safe Haven + +:::{important} +You will need a full name, phone number, email address and country for each user. +::: + +1. You can add users directly in your Entra tenant, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/how-to-create-delete-users). +1. Alternatively, you can use {typer}`dsh users add` to add multiple users from a CSV file with columns named (`GivenName`, `Surname`, `Phone`, `Email`, `CountryCode`). + - (Optional) you can provide a `Domain` column if you like but this will otherwise default to the domain of your SHM + - {{warning}} **Phone** must be in [E.123 international format](https://en.wikipedia.org/wiki/E.123) + - {{warning}} **CountryCode** is the two letter [ISO 3166-1 Alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements) code for the country where the user is based + +::::{admonition} Example CSV user file +:class: dropdown tip + +:::{code} text +GivenName;Surname;Phone;Email;CountryCode +Ada;Lovelace;+44800456456;ada@lovelace.me;GB +Grace;Hopper;+18005550100;grace@nasa.gov;US +::: +:::: + +```{code} shell +$ dsh users add PATH_TO_MY_CSV_FILE +``` + +## List available users + +- You can do this from the [Microsoft Entra admin centre](https://entra.microsoft.com/) + 1. Browse to **{menuselection}`Groups --> All Groups`** + 1. Click on the group named **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** + 1. Browse to **{menuselection}`Manage --> Members`** from the secondary menu on the left side +- You can do this at the command line by running `dsh users list`: + + ```{code} shell + $ dsh users list YOUR_SRE_NAME + ``` + + which will give output like the following + + ``` + โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ณโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”“ + โ”ƒ username โ”ƒ Entra ID โ”ƒ SRE YOUR_SRE_NAME โ”ƒ + โ”กโ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ•‡โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”โ”ฉ + โ”‚ ada.lovelace โ”‚ x โ”‚ x โ”‚ + โ”‚ grace.hopper โ”‚ x โ”‚ x โ”‚ + โ”‚ ursula.franklin โ”‚ x โ”‚ โ”‚ + โ”‚ joan.clarke โ”‚ x โ”‚ โ”‚ + โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ดโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜ + ``` + +## Assign existing users to an SRE + +1. You can do this directly in your Entra tenant by adding them to the **Data Safe Haven SRE _YOUR\_SRE\_NAME_ Users** group, following the instructions [here](https://learn.microsoft.com/en-us/entra/fundamentals/groups-view-azure-portal#add-a-group-member). +1. Alternatively, you can add multiple users from the command line using {typer}`dsh users register`: + + ```{code} shell + $ dsh users register YOUR_SRE_NAME -u USERNAME_1 -u USERNAME_2 + ``` + + where you must specify the usernames for each user you want to add to this SRE. + + :::{important} + Do not include the Entra ID domain part of the username, just the part before the @. + ::: + +## Manually register users for self-service password reset + +:::{tip} +Users created via the {typer}`dsh users` command line tool will be automatically registered for SSPR. +::: + +If you have manually created a user and want to enable SSPR, do the following + +- Go to the [Microsoft Entra admin centre](https://entra.microsoft.com/) +- Browse to **{menuselection}`Users --> All Users`** +- Select the user you want to enable SSPR for +- On the **{menuselection}`Manage --> Authentication Methods`** page fill out their contact info as follows: + - Ensure that you register **both** a phone number and an email address + - **Phone:** add the user's phone number with a space between the country code and the rest of the number (_e.g._ +44 7700900000) + - **Email:** enter the user's email address here + - Click the **{guilabel}`Save`** icon in the top panel diff --git a/docs/source/overview/sensitivity_tiers.md b/docs/source/overview/sensitivity_tiers.md index 4aef9a32fe..995be6ab87 100644 --- a/docs/source/overview/sensitivity_tiers.md +++ b/docs/source/overview/sensitivity_tiers.md @@ -49,7 +49,7 @@ Non-technical restrictions related to information governance procedures may also - connections to the in-browser remote desktop can only be made from an agreed set of IP addresses - outbound connections to the internet from inside the environment are not possible - copy-and-paste between the environment and the user's device is not possible -- access to all packages on PyPI and CRAN is made available through a proxy or mirror server +- access to all packages on PyPI and CRAN is made available through a proxy server Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs. @@ -63,7 +63,7 @@ At the Turing connections to Tier 2 environments are only permitted from **Organ **Tier 3** environments impose the following technical controls on top of what is required at {ref}`policy_tier_2`. -- a partial replica of agreed PyPI and CRAN packages is made available through a proxy or mirror server +- an agreed subset of PyPI and CRAN packages is made available through a proxy server Non-technical restrictions related to information governance procedures may also be applied according to your organisation's needs. diff --git a/docs/source/reference/config.md b/docs/source/reference/config.md new file mode 100644 index 0000000000..daffc4e2e8 --- /dev/null +++ b/docs/source/reference/config.md @@ -0,0 +1,10 @@ +# `config` + +`dsh config` commands are used to manage the configuration files that define SHMs and SREs. + +:::{typer} data_safe_haven.commands.config:config_command_group +:width: 65 +:prog: dsh config +:show-nested: +:make-sections: +::: diff --git a/docs/source/reference/context.md b/docs/source/reference/context.md new file mode 100644 index 0000000000..76b72a4ca0 --- /dev/null +++ b/docs/source/reference/context.md @@ -0,0 +1,10 @@ +# `context` + +`dsh context` commands are used to manage the Data Safe Haven contexts, which are the grouping within which a single SHM and its associated SREs are organised. + +:::{typer} data_safe_haven.commands.context:context_command_group +:width: 65 +:prog: dsh context +:show-nested: +:make-sections: +::: diff --git a/docs/source/reference/index.md b/docs/source/reference/index.md new file mode 100644 index 0000000000..6bcb99fd2e --- /dev/null +++ b/docs/source/reference/index.md @@ -0,0 +1,44 @@ +# CLI Reference + +:::{toctree} +:hidden: + +config.md +context.md +users.md +pulumi.md +shm.md +sre.md +::: + +A Data Safe Haven is managed using the `dsh` command line interface. +A full guide to the commands available for managing your Data Safe Haven is provided here. + +The `dsh` commands are the entrypoint to the Data Safe Haven command line interface. +All commands begin with `dsh`. + +:::{typer} data_safe_haven.commands.cli:application +:prog: dsh +:width: 65 +::: + +The subcommands can be used to manage various aspects of a Data Safe Haven deployment. +For further detail on each subcommand, navigate to the relevant page. + +[Config](config.md) +: Management of the configuration files used to define SHMs and SREs + +[Context](context.md) +: Manage DSH contexts, the groupings that encompass an SHM and its associated SREs + +[Users](users.md) +: Management of users in Entra ID + +[Pulumi](pulumi.md) +: An interface to the Pulumi command line interface + +[shm](shm.md) +: Management of infrastructure for DSH Safe Haven Management environments + +[sre](sre.md) +: Management of infrastructure for DSH Secure Research Environments diff --git a/docs/source/reference/pulumi.md b/docs/source/reference/pulumi.md new file mode 100644 index 0000000000..e6cb051860 --- /dev/null +++ b/docs/source/reference/pulumi.md @@ -0,0 +1,10 @@ +# `pulumi` + +The `pulumi` function allows you to run native Pulumi command line functions to interact with an SRE stack + +:::{typer} data_safe_haven.commands.pulumi:pulumi_command_group +:width: 65 +:prog: dsh pulumi +:show-nested: +:make-sections: +::: diff --git a/docs/source/reference/shm.md b/docs/source/reference/shm.md new file mode 100644 index 0000000000..0bb9f09105 --- /dev/null +++ b/docs/source/reference/shm.md @@ -0,0 +1,10 @@ +# `shm` + +`dsh shm` commands are used to deploy or teardown DSH Safe Haven Management infrastructure + +:::{typer} data_safe_haven.commands.shm:shm_command_group +:width: 65 +:prog: dsh shm +:show-nested: +:make-sections: +::: diff --git a/docs/source/reference/sre.md b/docs/source/reference/sre.md new file mode 100644 index 0000000000..03e478efe3 --- /dev/null +++ b/docs/source/reference/sre.md @@ -0,0 +1,10 @@ +# `sre` + +`dsh sre` commands are used to deploy or teardown the infrastructure for DSH Secure Research Environments + +:::{typer} data_safe_haven.commands.sre:sre_command_group +:width: 65 +:prog: dsh sre +:show-nested: +:make-sections: +::: diff --git a/docs/source/reference/users.md b/docs/source/reference/users.md new file mode 100644 index 0000000000..9b198ce5ba --- /dev/null +++ b/docs/source/reference/users.md @@ -0,0 +1,10 @@ +# `users` + +`dsh users` commands are used to manage users on the Entra ID associated with a DSH deployment. + +:::{typer} data_safe_haven.commands.users:users_command_group +:width: 65 +:prog: dsh users +:show-nested: +:make-sections: +::: diff --git a/pyproject.toml b/pyproject.toml index e3cb46525e..529ed565e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,24 +38,24 @@ dependencies = [ "azure-mgmt-rdbms==10.1.0", "azure-mgmt-resource==23.2.0", "azure-mgmt-storage==21.2.1", - "azure-storage-blob==12.23.1", - "azure-storage-file-datalake==12.17.0", - "azure-storage-file-share==12.19.0", + "azure-storage-blob==12.24.0", + "azure-storage-file-datalake==12.18.0", + "azure-storage-file-share==12.20.0", "chevron==0.14.0", - "cryptography==43.0.3", + "cryptography==44.0.0", "fqdn==1.5.1", "psycopg[binary]==3.1.19", # needed for installation on older MacOS versions - "pulumi-azure-native==2.71.0", + "pulumi-azure-native==2.76.0", "pulumi-azuread==6.0.1", "pulumi-random==4.16.7", - "pulumi==3.138.0", - "pydantic==2.9.2", - "pyjwt[crypto]==2.9.0", + "pulumi==3.142.0", + "pydantic==2.10.3", + "pyjwt[crypto]==2.10.1", "pytz==2024.2", "pyyaml==6.0.2", "rich==13.9.4", "simple-acme-dns==3.2.0", - "typer==0.13.0", + "typer==0.15.1", "websocket-client==1.8.0", ] @@ -71,15 +71,16 @@ docs = [ "pydata-sphinx-theme==0.16.0", "sphinx-togglebutton==0.3.2", "sphinx==8.1.3", + "sphinxcontrib-typer==0.5.1", ] lint = [ - "ansible-dev-tools==24.10.2", - "ansible==10.6.0", + "ansible-dev-tools==24.11.0", + "ansible==11.1.0", "black==24.10.0", "mypy==1.13.0", - "pandas-stubs==2.2.3.241009", - "pydantic==2.9.2", - "ruff==0.7.3", + "pandas-stubs==2.2.3.241126", + "pydantic==2.10.3", + "ruff==0.8.2", "types-appdirs==1.4.3.5", "types-chevron==0.14.2.20240310", "types-pytz==2024.2.0.20241003", @@ -87,10 +88,10 @@ lint = [ "types-requests==2.32.0.20241016", ] test = [ - "coverage==7.6.4", + "coverage==7.6.9", "freezegun==1.5.1", "pytest-mock==3.14.0", - "pytest==8.3.3", + "pytest==8.3.4", "requests-mock==1.12.1", ] @@ -120,11 +121,11 @@ lock-filename = ".hatch/requirements.txt" [tool.hatch.envs.docs] type = "pip-compile" lock-filename = ".hatch/requirements-docs.txt" -detached = true features = ["docs"] [tool.hatch.envs.docs.scripts] build = "sphinx-build -M html docs/source/ docs/build/ --fail-on-warning" +clean = "rm -r docs/build" lint = "mdl --style .mdlstyle.rb ./docs/source" [tool.hatch.envs.lint] @@ -158,7 +159,7 @@ pip-compile-constraint = "default" features = ["test"] [tool.hatch.envs.test.scripts] -test = "coverage run -m pytest {args: tests}" +test = "coverage run -m pytest {args:} ./tests" test-report = "coverage report {args:}" test-coverage = ["test", "test-report"] diff --git a/tests/commands/conftest.py b/tests/commands/conftest.py index d675398bfc..de60eb29d0 100644 --- a/tests/commands/conftest.py +++ b/tests/commands/conftest.py @@ -1,6 +1,8 @@ from pytest import fixture from typer.testing import CliRunner +from data_safe_haven.administration.users.entra_users import EntraUsers +from data_safe_haven.administration.users.research_user import ResearchUser from data_safe_haven.config import ( Context, ContextManager, @@ -260,3 +262,14 @@ def tmp_contexts_none(tmp_path, context_yaml): with open(config_file_path, "w") as f: f.write(context_yaml) return tmp_path + + +@fixture +def mock_entra_user_list(mocker): + test_user = ResearchUser( + given_name="Harry", + surname="Lime", + sam_account_name="harry.lime", + user_principal_name="harry.lime@acme.testing", + ) + mocker.patch.object(EntraUsers, "list", return_value=[test_user]) diff --git a/tests/commands/test_sre.py b/tests/commands/test_sre.py index a13518a878..9d2f79d07c 100644 --- a/tests/commands/test_sre.py +++ b/tests/commands/test_sre.py @@ -5,7 +5,7 @@ from data_safe_haven.commands.sre import sre_command_group from data_safe_haven.config import Context, ContextManager from data_safe_haven.exceptions import DataSafeHavenAzureError -from data_safe_haven.external import AzureSdk +from data_safe_haven.external import AzureSdk, GraphApi class TestDeploySRE: @@ -31,13 +31,17 @@ def test_no_application( self, caplog: LogCaptureFixture, runner: CliRunner, + mocker, mock_azuresdk_get_subscription_name, # noqa: ARG002 mock_contextmanager_assert_context, # noqa: ARG002 mock_ip_1_2_3_4, # noqa: ARG002 mock_pulumi_config_from_remote_or_create, # noqa: ARG002 mock_shm_config_from_remote, # noqa: ARG002 mock_sre_config_from_remote, # noqa: ARG002 + mock_graphapi_get_credential, # noqa: ARG002 ) -> None: + mocker.patch.object(GraphApi, "get_application_by_name", return_value=None) + result = runner.invoke(sre_command_group, ["deploy", "sandbox"]) assert result.exit_code == 1 assert ( diff --git a/tests/commands/test_users.py b/tests/commands/test_users.py index c1b183c922..5c11e29cc9 100644 --- a/tests/commands/test_users.py +++ b/tests/commands/test_users.py @@ -52,6 +52,26 @@ def test_invalid_shm( assert result.exit_code == 1 assert "Have you deployed the SHM?" in result.stdout + def test_mismatched_domain( + self, + mock_graphapi_get_credential, # noqa: ARG002 + mock_pulumi_config_no_key_from_remote, # noqa: ARG002 + mock_shm_config_from_remote, # noqa: ARG002 + mock_sre_config_from_remote, # noqa: ARG002 + mock_entra_user_list, # noqa: ARG002 + runner, + tmp_contexts, # noqa: ARG002 + ): + result = runner.invoke( + users_command_group, ["register", "-u", "harry.lime", "sandbox"] + ) + + assert result.exit_code == 0 + assert ( + "principal domain name must match the domain of the SRE to be registered" + in result.stdout + ) + def test_invalid_sre( self, mock_pulumi_config_from_remote, # noqa: ARG002 diff --git a/tests/config/test_config_sections.py b/tests/config/test_config_sections.py index 7d9a0ba873..6528b130fa 100644 --- a/tests/config/test_config_sections.py +++ b/tests/config/test_config_sections.py @@ -170,24 +170,6 @@ def test_all_databases_must_be_unique(self) -> None: databases=[DatabaseSystem.POSTGRESQL, DatabaseSystem.POSTGRESQL], ) - def test_data_provider_tag_internet( - self, - config_subsection_remote_desktop: ConfigSubsectionRemoteDesktopOpts, - config_subsection_storage_quota_gb: ConfigSubsectionStorageQuotaGB, - ): - sre_config = ConfigSectionSRE( - admin_email_address="admin@example.com", - remote_desktop=config_subsection_remote_desktop, - storage_quota_gb=config_subsection_storage_quota_gb, - data_provider_ip_addresses="Internet", - ) - assert isinstance(sre_config.data_provider_ip_addresses, AzureServiceTag) - assert sre_config.data_provider_ip_addresses == "Internet" - - def test_data_provider_tag_invalid(self): - with pytest.raises(ValueError, match="Input should be 'Internet'"): - ConfigSectionSRE(data_provider_ip_addresses="Not a tag") - def test_ip_overlap_admin(self): with pytest.raises(ValueError, match="IP addresses must not overlap."): ConfigSectionSRE( diff --git a/tests/conftest.py b/tests/conftest.py index 5a8ce42847..8734d39ba1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -29,7 +29,10 @@ ) from data_safe_haven.exceptions import DataSafeHavenAzureError from data_safe_haven.external import AzureSdk, PulumiAccount -from data_safe_haven.external.api.credentials import AzureSdkCredential +from data_safe_haven.external.api.credentials import ( + AzureSdkCredential, + GraphApiCredential, +) from data_safe_haven.infrastructure import SREProjectManager from data_safe_haven.infrastructure.project_manager import ProjectManager from data_safe_haven.logging import init_logging @@ -215,6 +218,19 @@ def mock_azuresdk_get_subscription_name(mocker): ) +@fixture +def mock_graphapi_get_credential(mocker): + class MockCredential(TokenCredential): + def get_token(*args, **kwargs): # noqa: ARG002 + return AccessToken("dummy-token", 0) + + mocker.patch.object( + GraphApiCredential, + "get_credential", + return_value=MockCredential(), + ) + + @fixture def mock_azuresdk_get_credential(mocker): class MockCredential(TokenCredential): diff --git a/tests/external/api/test_credentials.py b/tests/external/api/test_credentials.py index c0e631e912..e57bdb324b 100644 --- a/tests/external/api/test_credentials.py +++ b/tests/external/api/test_credentials.py @@ -5,7 +5,10 @@ ) from data_safe_haven.directories import config_dir -from data_safe_haven.exceptions import DataSafeHavenAzureError +from data_safe_haven.exceptions import ( + DataSafeHavenAzureError, + DataSafeHavenCachedCredentialError, +) from data_safe_haven.external.api.credentials import ( AzureSdkCredential, DeferredCredential, @@ -13,7 +16,7 @@ ) -class TestDeferredCredential: +class TestAzureSdkCredential: def test_confirm_credentials_interactive( self, mock_confirm_yes, # noqa: ARG002 @@ -33,14 +36,17 @@ def test_confirm_credentials_interactive_fail( self, mock_confirm_no, # noqa: ARG002 mock_azureclicredential_get_token, # noqa: ARG002 + capsys, ): DeferredCredential.cache_ = set() credential = AzureSdkCredential(skip_confirmation=False) with pytest.raises( - DataSafeHavenAzureError, - match="Error getting account information from Azure CLI.", + DataSafeHavenCachedCredentialError, + match="Selected credentials are incorrect.", ): credential.get_credential() + out, _ = capsys.readouterr() + assert "Please authenticate with Azure: run 'az login'" in out def test_confirm_credentials_interactive_cache( self, @@ -67,8 +73,6 @@ def test_decode_token_error( ): credential.decode_token(credential.token) - -class TestAzureSdkCredential: def test_get_credential(self, mock_azureclicredential_get_token): # noqa: ARG002 credential = AzureSdkCredential(skip_confirmation=True) assert isinstance(credential.get_credential(), AzureCliCredential)