From 3a4fa4c53a825b2a0284bb254f0a2f90d2adb2dd Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Fri, 22 Sep 2023 08:40:36 +0000 Subject: [PATCH 01/14] Add a custom playbook to fix OVN chassis priorities Sometimes, typically after restarting OVN services, the priorities of entries in the ha_chassis and gateway_chassis tables in the OVN northbound database can become misaligned. This results in broken routing for external (bare metal/SR-IOV) ports. This playbook can be used to fix the issue by realigning the priorities of the table entries. It does so by assigning the highest priority to the "first" (sorted alphabetically) OVN NB DB host. This results in all gateways being scheduled to a single host, but is less complicated than trying to balance them (and it's also not clear to me how to map between individual ha_chassis and gateway_chassis entries). The playbook can be run as follows: kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/ovn-fix-chassis-priorities.yml If the 'controllers' group does not align with the group used to deploy the OVN NB DB, this can be overridden by passing the following: '-e ovn_nb_db_group=some_other_group' --- .../ansible/ovn-fix-chassis-priorities.yml | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 etc/kayobe/ansible/ovn-fix-chassis-priorities.yml diff --git a/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml new file mode 100644 index 000000000..20542df88 --- /dev/null +++ b/etc/kayobe/ansible/ovn-fix-chassis-priorities.yml @@ -0,0 +1,69 @@ +--- +# Sometimes, typically after restarting OVN services, the priorities of entries +# in the ha_chassis and gateway_chassis tables in the OVN northbound database +# can become misaligned. This results in broken routing for external (bare +# metal/SR-IOV) ports. + +# This playbook can be used to fix the issue by realigning the priorities of +# the table entries. It does so by assigning the highest priority to the +# "first" (sorted alphabetically) OVN NB DB host. This results in all gateways +# being scheduled to a single host, but is less complicated than trying to +# balance them (and it's also not clear to me how to map between individual +# ha_chassis and gateway_chassis entries). + +# The playbook can be run as follows: +# kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/ovn-fix-chassis-priorities.yml + +# If the 'controllers' group does not align with the group used to deploy the +# OVN NB DB, this can be overridden by passing the following: +# '-e ovn_nb_db_group=some_other_group' + +- name: Find OVN DB DB Leader + hosts: "{{ ovn_nb_db_group | default('controllers') }}" + tasks: + - name: Find the OVN NB DB leader + command: docker exec -it ovn_nb_db ovn-nbctl get-connection + changed_when: false + failed_when: false + register: ovn_check_result + check_mode: no + + - name: Group hosts by leader/follower role + group_by: + key: "ovn_nb_{{ 'leader' if ovn_check_result.rc == 0 else 'follower' }}" + changed_when: false + + - name: Assert one leader exists + assert: + that: + - groups['ovn_nb_leader'] | default([]) | length == 1 + +- name: Fix OVN chassis priorities + hosts: ovn_nb_leader + vars: + ovn_nb_db_group: controllers + ovn_nb_db_hosts_sorted: "{{ query('inventory_hostnames', ovn_nb_db_group) | sort | list }}" + ha_chassis_max_priority: 32767 + gateway_chassis_max_priority: "{{ ovn_nb_db_hosts_sorted | length }}" + tasks: + - name: Fix ha_chassis priorities + command: >- + docker exec -it ovn_nb_db + bash -c ' + ovn-nbctl find ha_chassis chassis_name={{ item }} | + awk '\''$1 == "_uuid" { print $3 }'\'' | + while read uuid; do ovn-nbctl set ha_chassis $uuid priority={{ priority }}; done' + loop: "{{ ovn_nb_db_hosts_sorted }}" + vars: + priority: "{{ ha_chassis_max_priority | int - ovn_nb_db_hosts_sorted.index(item) }}" + + - name: Fix gateway_chassis priorities + command: >- + docker exec -it ovn_nb_db + bash -c ' + ovn-nbctl find gateway_chassis chassis_name={{ item }} | + awk '\''$1 == "_uuid" { print $3 }'\'' | + while read uuid; do ovn-nbctl set gateway_chassis $uuid priority={{ priority }}; done' + loop: "{{ ovn_nb_db_hosts_sorted }}" + vars: + priority: "{{ gateway_chassis_max_priority | int - ovn_nb_db_hosts_sorted.index(item) }}" From e57542ccf2b1d868a35964928211f90078103f54 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Thu, 28 Sep 2023 21:02:24 +0200 Subject: [PATCH 02/14] Document stackhpc_pulp_images_kolla_filter variable --- doc/source/configuration/release-train.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/source/configuration/release-train.rst b/doc/source/configuration/release-train.rst index d4757f54f..1b0a48a00 100644 --- a/doc/source/configuration/release-train.rst +++ b/doc/source/configuration/release-train.rst @@ -170,6 +170,16 @@ promoted to production: kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-repo-promote-production.yml +Synchronising all Kolla container images can take a long time. A limited list +of images can be synchronised using the ``stackhpc_pulp_images_kolla_filter`` +variable, which accepts a whitespace-separated list of regular expressions +matching Kolla image names. Usage is similar to ``kolla-build`` CLI arguments. +For example: + +.. code-block:: console + + kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-container-sync.yml -e stackhpc_pulp_images_kolla_filter='"^glance nova-compute$"' + Initial seed deployment ----------------------- From ed2aadd15d06127179818132681255ec296371ee Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Fri, 29 Sep 2023 09:00:32 +0100 Subject: [PATCH 03/14] Use upper constraints to install openstackclient ``` Traceback (most recent call last): File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/bin/openstack", line 5, in from openstackclient.shell import main File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstackclient/shell.py", line 23, in from osc_lib import shell File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/osc_lib/shell.py", line 32, in from osc_lib.cli import client_config as cloud_config File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/osc_lib/cli/client_config.py", line 18, in from openstack.config import exceptions as sdk_exceptions File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstack/__init__.py", line 58, in import openstack.connection File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstack/connection.py", line 217, in from openstack import _services_mixin File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstack/_services_mixin.py", line 6, in from openstack.block_storage import block_storage_service File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstack/block_storage/block_storage_service.py", line 14, in from openstack.block_storage.v3 import _proxy as _v3_proxy File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstack/block_storage/v3/_proxy.py", line 37, in class Proxy(_base_proxy.BaseBlockStorageProxy): File "/home/cloud-user/actions-runner/_work/stackhpc-kayobe-config/stackhpc-kayobe-config/venvs/kayobe/lib64/python3.6/site-packages/openstack/block_storage/v3/_proxy.py", line 1592, in Proxy ignore_missing: ty.Literal[True] = True, AttributeError: module 'typing' has no attribute 'Literal' ``` See: https://github.com/stackhpc/stackhpc-kayobe-config/actions/runs/6341830858/job/17226310409 --- .github/workflows/overcloud-host-image-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/overcloud-host-image-build.yml b/.github/workflows/overcloud-host-image-build.yml index 70f9fce27..835a7921f 100644 --- a/.github/workflows/overcloud-host-image-build.yml +++ b/.github/workflows/overcloud-host-image-build.yml @@ -143,7 +143,7 @@ jobs: - name: Install OpenStack client run: | source venvs/kayobe/bin/activate && - pip install python-openstackclient + pip install python-openstackclient -c https://opendev.org/openstack/requirements/raw/branch/stable/${{ steps.openstack_release.outputs.openstack_release }}/upper-constraints.txt - name: Build a CentOS Stream 8 overcloud host image id: build_centos_stream_8 From e441f497c40124117045883e54139c5ddda4ee8c Mon Sep 17 00:00:00 2001 From: Grzegorz Bialas <93606484+GregWhiteyBialas@users.noreply.github.com> Date: Fri, 29 Sep 2023 15:29:14 +0200 Subject: [PATCH 04/14] Add python and growpart to images (#676) * add python3 to images * add growpart --- etc/kayobe/stackhpc-overcloud-dib.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/etc/kayobe/stackhpc-overcloud-dib.yml b/etc/kayobe/stackhpc-overcloud-dib.yml index aaf8b93df..4c76a5cd4 100644 --- a/etc/kayobe/stackhpc-overcloud-dib.yml +++ b/etc/kayobe/stackhpc-overcloud-dib.yml @@ -67,6 +67,7 @@ stackhpc_overcloud_dib_packages: - "vim" - "git" - "less" + - "python3" - "{% if os_distribution == 'ubuntu' %}netbase{% endif %}" - "{% if os_distribution == 'ubuntu' %}iputils-ping{% endif %}" - "{% if os_distribution == 'ubuntu' %}curl{% endif %}" @@ -74,6 +75,8 @@ stackhpc_overcloud_dib_packages: - "{% if os_distribution == 'centos' %}openssh-clients{% endif %}" - "{% if os_distribution == 'rocky' %}NetworkManager-config-server{% endif %}" - "{% if os_distribution == 'rocky' %}linux-firmware{% endif %}" + - "{% if os_distribution == 'rocky' %}cloud-utils-growpart{% endif %}" + - "{% if os_distribution == 'ubuntu' %}cloud-guest-utils{% endif %}" # StackHPC overcloud DIB image block device configuration. # This image layout conforms to the CIS partition benchmarks. From 0c026e1ba2a0474f3b13e3f21bf1341ef13ef73e Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Fri, 29 Sep 2023 09:40:17 +0100 Subject: [PATCH 05/14] Bump Rocky 9 host image to 9.2 Hitting this issue with cloud-init in 9.1: ``` Unable to find a system nic for from cloud-init ``` This means that network interfaces are not configured properly. Newer cloud-init seems to fix the issue. See: https://askubuntu.com/questions/1400527/unable-to-find-a-system-nic-while-running-cloud-init --- etc/kayobe/pulp-host-image-versions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/kayobe/pulp-host-image-versions.yml b/etc/kayobe/pulp-host-image-versions.yml index 33650e8a7..028f7f542 100644 --- a/etc/kayobe/pulp-host-image-versions.yml +++ b/etc/kayobe/pulp-host-image-versions.yml @@ -3,6 +3,6 @@ # These images must be in SMS, since they are used by our AIO CI runners stackhpc_centos_8_stream_overcloud_host_image_version: "yoga-20230525T095243" stackhpc_rocky_8_overcloud_host_image_version: "yoga-20230629T135322" -stackhpc_rocky_9_overcloud_host_image_version: "yoga-20230515T145140" +stackhpc_rocky_9_overcloud_host_image_version: "yoga-20230929T080356" stackhpc_ubuntu_focal_overcloud_host_image_version: "yoga-20230609T120720" stackhpc_ubuntu_jammy_overcloud_host_image_version: "yoga-20230609T120720" From b0fbc2dc64d8bb13dfc867ed37067841663e6de5 Mon Sep 17 00:00:00 2001 From: Matt Crees Date: Fri, 29 Sep 2023 15:18:35 +0100 Subject: [PATCH 06/14] Bump Rocky tag for python and growpart additions --- etc/kayobe/pulp-host-image-versions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/kayobe/pulp-host-image-versions.yml b/etc/kayobe/pulp-host-image-versions.yml index 028f7f542..1d89efb35 100644 --- a/etc/kayobe/pulp-host-image-versions.yml +++ b/etc/kayobe/pulp-host-image-versions.yml @@ -3,6 +3,6 @@ # These images must be in SMS, since they are used by our AIO CI runners stackhpc_centos_8_stream_overcloud_host_image_version: "yoga-20230525T095243" stackhpc_rocky_8_overcloud_host_image_version: "yoga-20230629T135322" -stackhpc_rocky_9_overcloud_host_image_version: "yoga-20230929T080356" +stackhpc_rocky_9_overcloud_host_image_version: "yoga-20230929T133006" stackhpc_ubuntu_focal_overcloud_host_image_version: "yoga-20230609T120720" stackhpc_ubuntu_jammy_overcloud_host_image_version: "yoga-20230609T120720" From 8a8e8462070ecc3aeba6e16de3bf498dd94fe083 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Fri, 29 Sep 2023 14:41:38 +0100 Subject: [PATCH 07/14] Bump Rocky 9 snapshots This brings in the CPU vulnerability fixes for: - Zenbleed - Downfall --- etc/kayobe/pulp-repo-versions.yml | 12 ++++++------ ...rocky9-snapshots-2023-09-29-c736c3d37afd7e5c.yaml | 7 +++++++ 2 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 releasenotes/notes/bump-rocky9-snapshots-2023-09-29-c736c3d37afd7e5c.yaml diff --git a/etc/kayobe/pulp-repo-versions.yml b/etc/kayobe/pulp-repo-versions.yml index 46b8dbbb5..d4df1cad8 100644 --- a/etc/kayobe/pulp-repo-versions.yml +++ b/etc/kayobe/pulp-repo-versions.yml @@ -19,8 +19,8 @@ stackhpc_pulp_repo_centos_stream_9_storage_ceph_pacific_version: 20230308T155704 stackhpc_pulp_repo_docker_ce_ubuntu_version: 20230908T013529 stackhpc_pulp_repo_docker_version: 20230801T003759 stackhpc_pulp_repo_elasticsearch_logstash_kibana_7_x_version: 20230727T144020 -stackhpc_pulp_repo_epel_9_version: 20230302T031902 stackhpc_pulp_repo_elrepo_9_version: 20230907T075311 +stackhpc_pulp_repo_epel_9_version: 20230929T005202 stackhpc_pulp_repo_epel_modular_version: 20220913T043117 stackhpc_pulp_repo_epel_version: 20230206T150339 stackhpc_pulp_repo_grafana_version: 20230903T003752 @@ -46,11 +46,11 @@ stackhpc_pulp_repo_rocky_9_1_baseos_version: 20230228T044432 stackhpc_pulp_repo_rocky_9_1_crb_version: 20230228T044432 stackhpc_pulp_repo_rocky_9_1_extras_version: 20230228T044432 stackhpc_pulp_repo_rocky_9_1_highavailability_version: 20230228T044432 -stackhpc_pulp_repo_rocky_9_2_appstream_version: 20230825T131407 -stackhpc_pulp_repo_rocky_9_2_baseos_version: 20230825T131407 -stackhpc_pulp_repo_rocky_9_2_crb_version: 20230825T131407 -stackhpc_pulp_repo_rocky_9_2_extras_version: 20230825T131407 -stackhpc_pulp_repo_rocky_9_2_highavailability_version: 20230805T012805 +stackhpc_pulp_repo_rocky_9_2_appstream_version: 20230928T024829 +stackhpc_pulp_repo_rocky_9_2_baseos_version: 20230928T024829 +stackhpc_pulp_repo_rocky_9_2_crb_version: 20230928T024829 +stackhpc_pulp_repo_rocky_9_2_extras_version: 20230915T001040 +stackhpc_pulp_repo_rocky_9_2_highavailability_version: 20230918T015928 stackhpc_pulp_repo_treasuredata_4_version: 20230903T003752 stackhpc_pulp_repo_ubuntu_cloud_archive_version: 20230908T112533 stackhpc_pulp_repo_ubuntu_focal_security_version: 20230908T101641 diff --git a/releasenotes/notes/bump-rocky9-snapshots-2023-09-29-c736c3d37afd7e5c.yaml b/releasenotes/notes/bump-rocky9-snapshots-2023-09-29-c736c3d37afd7e5c.yaml new file mode 100644 index 000000000..83a9f5565 --- /dev/null +++ b/releasenotes/notes/bump-rocky9-snapshots-2023-09-29-c736c3d37afd7e5c.yaml @@ -0,0 +1,7 @@ +--- +security: + - | + The snapshots for Rocky 9.2 have been refreshed to include fixes for + Zenbleed (CVE-2023-20593), Downfall (CVE-2022-40982). It is recommended + that you update your OS packages and reboot into the kernel as soon as + possible. From 36af591202f1c1875ecc3e83920bbf0c050aa4f6 Mon Sep 17 00:00:00 2001 From: Pierre Riteau Date: Tue, 3 Oct 2023 16:08:46 +0200 Subject: [PATCH 08/14] Change default to reboot one host at a time This is safer if the playbook is run by accident. Existing behaviour can be retained by setting ANSIBLE_SERIAL=0. --- etc/kayobe/ansible/reboot.yml | 2 +- .../notes/reboot-default-serial-5944a2a648da71c7.yaml | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 releasenotes/notes/reboot-default-serial-5944a2a648da71c7.yaml diff --git a/etc/kayobe/ansible/reboot.yml b/etc/kayobe/ansible/reboot.yml index a284dd425..8810afd7f 100644 --- a/etc/kayobe/ansible/reboot.yml +++ b/etc/kayobe/ansible/reboot.yml @@ -1,7 +1,7 @@ --- - name: Reboot the host hosts: seed-hypervisor:seed:overcloud:infra-vms - serial: "{{ lookup('env', 'ANSIBLE_SERIAL') | default(0, true) }}" + serial: "{{ lookup('env', 'ANSIBLE_SERIAL') | default(1, true) }}" tags: - reboot tasks: diff --git a/releasenotes/notes/reboot-default-serial-5944a2a648da71c7.yaml b/releasenotes/notes/reboot-default-serial-5944a2a648da71c7.yaml new file mode 100644 index 000000000..7eb2e28cd --- /dev/null +++ b/releasenotes/notes/reboot-default-serial-5944a2a648da71c7.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The ``reboot.yml`` custom Ansible playbook now defaults to reboot only one + host at a time. Existing behaviour can be retained by setting + ANSIBLE_SERIAL=0. From 3062cc73fb5487b2a62a3fa794c2a9bfbeac7df0 Mon Sep 17 00:00:00 2001 From: Will Szumski Date: Fri, 29 Sep 2023 11:36:19 +0100 Subject: [PATCH 09/14] Bump Rocky 8 snapshots This brings in the CPU vulnerability fixes for: - Zenbleed - Downfall --- etc/kayobe/pulp-repo-versions.yml | 7 ++++++- etc/kayobe/pulp.yml | 4 ++-- .../bump-rocky8-snapshots-2023-09-29-e115427edd3334c7.yaml | 7 +++++++ 3 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 releasenotes/notes/bump-rocky8-snapshots-2023-09-29-e115427edd3334c7.yaml diff --git a/etc/kayobe/pulp-repo-versions.yml b/etc/kayobe/pulp-repo-versions.yml index 46b8dbbb5..f2f8b6b25 100644 --- a/etc/kayobe/pulp-repo-versions.yml +++ b/etc/kayobe/pulp-repo-versions.yml @@ -22,7 +22,7 @@ stackhpc_pulp_repo_elasticsearch_logstash_kibana_7_x_version: 20230727T144020 stackhpc_pulp_repo_epel_9_version: 20230302T031902 stackhpc_pulp_repo_elrepo_9_version: 20230907T075311 stackhpc_pulp_repo_epel_modular_version: 20220913T043117 -stackhpc_pulp_repo_epel_version: 20230206T150339 +stackhpc_pulp_repo_epel_version: 20230929T005202 stackhpc_pulp_repo_grafana_version: 20230903T003752 stackhpc_pulp_repo_mariadb_10_6_centos8_version: 20230815T010124 stackhpc_pulp_repo_mlnx_ofed_5_7_1_0_2_0_rhel8_6_version: 20220920T151419 @@ -41,6 +41,11 @@ stackhpc_pulp_repo_rocky_8_7_baseos_version: 20221202T032715 stackhpc_pulp_repo_rocky_8_7_extras_version: 20221201T192704 stackhpc_pulp_repo_rocky_8_7_nfv_version: 20221202T032715 stackhpc_pulp_repo_rocky_8_7_powertools_version: 20221202T032715 +stackhpc_pulp_repo_rocky_8_8_appstream_version: 20230928T024829 +stackhpc_pulp_repo_rocky_8_8_baseos_version: 20230928T024829 +stackhpc_pulp_repo_rocky_8_8_extras_version: 20230928T024829 +stackhpc_pulp_repo_rocky_8_8_nfv_version: 20230922T023520 +stackhpc_pulp_repo_rocky_8_8_powertools_version: 20230928T024829 stackhpc_pulp_repo_rocky_9_1_appstream_version: 20230228T044432 stackhpc_pulp_repo_rocky_9_1_baseos_version: 20230228T044432 stackhpc_pulp_repo_rocky_9_1_crb_version: 20230228T044432 diff --git a/etc/kayobe/pulp.yml b/etc/kayobe/pulp.yml index 4ec4cfb82..35837c22f 100644 --- a/etc/kayobe/pulp.yml +++ b/etc/kayobe/pulp.yml @@ -217,8 +217,8 @@ stackhpc_pulp_sync_centos_stream8: "{{ os_distribution == 'centos' }}" # Whether to sync Rocky Linux 8 packages. stackhpc_pulp_sync_rocky_8: "{{ os_distribution == 'rocky' and os_release == '8' }}" -# Rocky 8 minor version number. Supported values: 6, 7 -stackhpc_pulp_repo_rocky_8_minor_version: 7 +# Rocky 8 minor version number. Supported values: 6, 7, 8 +stackhpc_pulp_repo_rocky_8_minor_version: 8 # Rocky 8 Snapshot versions. The defaults use the appropriate version from # pulp-repo-versions.yml for the selected minor release. stackhpc_pulp_repo_rocky_8_appstream_version: "{{ lookup('vars', 'stackhpc_pulp_repo_rocky_8_%s_appstream_version' % stackhpc_pulp_repo_rocky_8_minor_version) }}" diff --git a/releasenotes/notes/bump-rocky8-snapshots-2023-09-29-e115427edd3334c7.yaml b/releasenotes/notes/bump-rocky8-snapshots-2023-09-29-e115427edd3334c7.yaml new file mode 100644 index 000000000..f44c44d98 --- /dev/null +++ b/releasenotes/notes/bump-rocky8-snapshots-2023-09-29-e115427edd3334c7.yaml @@ -0,0 +1,7 @@ +--- +security: + - | + The Rocky 8 minor version has been bumped to 8.8 and new snapshots have + been created to include fixes for Zenbleed (CVE-2023-20593), Downfall + (CVE-2022-40982). It is recommended that you update your OS packages and + reboot into the kernel as soon as possible. From ffe9c4b308420ed659a622114b0969ecdd647efe Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Wed, 4 Oct 2023 12:39:06 +0100 Subject: [PATCH 10/14] Add debugging info to tls deployment docs --- doc/source/configuration/vault.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/doc/source/configuration/vault.rst b/doc/source/configuration/vault.rst index 1db3689d1..0538a0256 100644 --- a/doc/source/configuration/vault.rst +++ b/doc/source/configuration/vault.rst @@ -229,6 +229,18 @@ Enable the required TLS variables in kayobe and kolla kayobe overcloud service deploy + If VM provisioning fails with an error with this format: + + .. code-block:: + + Unable to establish connection to http://:9696/v2.0/ports/some-sort-of-uuid: Connection aborted + + Restart the nova-compute container on all hypervisors: + + .. code-block:: + + kayobe overcloud host command run --command "docker restart nova_compute" --become --show-output -l compute + Barbican integration ==================== From ec606c672a73ef9e569df3946803572da4b70212 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Wed, 27 Sep 2023 16:57:37 +0100 Subject: [PATCH 11/14] Fail container build workflow when no images build --- .github/workflows/stackhpc-container-image-build.yml | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/.github/workflows/stackhpc-container-image-build.yml b/.github/workflows/stackhpc-container-image-build.yml index 0be944c84..3aaab4e9e 100644 --- a/.github/workflows/stackhpc-container-image-build.yml +++ b/.github/workflows/stackhpc-container-image-build.yml @@ -167,6 +167,10 @@ jobs: env: KAYOBE_VAULT_PASSWORD: ${{ secrets.KAYOBE_VAULT_PASSWORD }} + - name: Prune local Kolla container images over 1 week old + run: | + sudo docker image prune --all --force --filter until=168h --filter="label=kolla_version" + - name: Build and push kolla overcloud images run: | args="${{ github.event.inputs.regexes }}" @@ -200,6 +204,9 @@ jobs: run: | sudo docker image ls --filter "reference=ark.stackhpc.com/stackhpc-dev/${{ matrix.distro }}-*:${{ needs.generate-tag.outputs.kolla_tag }}" > ${{ matrix.distro }}-container-images + - name: Fail if no images have been built + run: if [ $(wc -l < ${{ matrix.distro }}-container-images) -le 1 ]; then exit 1; fi + - name: Upload container images artifact uses: actions/upload-artifact@v3 with: @@ -207,10 +214,6 @@ jobs: path: ${{ matrix.distro }}-container-images retention-days: 7 - - name: Prune local Kolla container images over 1 week old - run: | - sudo docker image prune --all --force --filter until=168h --filter="label=kolla_version" - sync-container-repositories: name: Trigger container image repository sync needs: From 47efa99702debbce97a8b608de94fff60adedc84 Mon Sep 17 00:00:00 2001 From: Alex-Welsh Date: Thu, 5 Oct 2023 11:30:30 +0100 Subject: [PATCH 12/14] Update walled garden guide no_proxy defaults --- doc/source/configuration/walled-garden.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/configuration/walled-garden.rst b/doc/source/configuration/walled-garden.rst index 9a45ea4db..937619a5a 100644 --- a/doc/source/configuration/walled-garden.rst +++ b/doc/source/configuration/walled-garden.rst @@ -77,7 +77,8 @@ proxy: - "127.0.0.1" - "localhost" - "{{ ('http://' ~ docker_registry) | urlsplit('hostname') if docker_registry else '' }}" - - "{{ admin_oc_net_name | net_ip(inventory_hostname=groups['seed'][0]) }}" + - "{{ lookup('vars', admin_oc_net_name ~ '_ips')[groups.seed.0] }}" + - "{{ lookup('vars', admin_oc_net_name ~ '_ips')[inventory_hostname] }}" - "{{ kolla_external_fqdn }}" - "{{ kolla_internal_fqdn }}" From 7bb0de408a63112f2e67d4d768eb8733cec67baa Mon Sep 17 00:00:00 2001 From: Grzegorz Koper Date: Fri, 6 Oct 2023 10:00:58 +0200 Subject: [PATCH 13/14] Moving bifrost config into its proper folder. --- etc/kayobe/kolla/config/{ => bifrost}/bifrost.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename etc/kayobe/kolla/config/{ => bifrost}/bifrost.yml (100%) diff --git a/etc/kayobe/kolla/config/bifrost.yml b/etc/kayobe/kolla/config/bifrost/bifrost.yml similarity index 100% rename from etc/kayobe/kolla/config/bifrost.yml rename to etc/kayobe/kolla/config/bifrost/bifrost.yml From 1722ffa2e7b411f3a0ecaae9c992ffe26a721848 Mon Sep 17 00:00:00 2001 From: Mark Goddard Date: Wed, 27 Sep 2023 16:17:11 +0100 Subject: [PATCH 14/14] docs: fix wazuh headings --- doc/source/configuration/wazuh.rst | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/doc/source/configuration/wazuh.rst b/doc/source/configuration/wazuh.rst index 8020d154a..1ba1574b2 100644 --- a/doc/source/configuration/wazuh.rst +++ b/doc/source/configuration/wazuh.rst @@ -2,8 +2,8 @@ Wazuh ===== -Wazuh Manager -============= +Wazuh Manager Host +================== Provision using infra-vms ------------------------- @@ -288,7 +288,7 @@ Encrypt the keys (and remember to commit to git): ``ansible-vault encrypt --vault-password-file ~/vault.pass $KAYOBE_CONFIG_PATH/ansible/wazuh/certificates/certs/*.key`` Verification -============== +------------ The Wazuh portal should be accessible on port 443 of the Wazuh manager’s IPs (using HTTPS, with the root CA cert in ``etc/kayobe/ansible/wazuh/certificates/wazuh-certificates/root-ca.pem``). @@ -300,11 +300,9 @@ Troubleshooting Logs are in ``/var/log/wazuh-indexer/wazuh.log``. There are also logs in the journal. -============ Wazuh agents ============ - Wazuh agent playbook is located in ``etc/kayobe/ansible/wazuh-agent.yml``. Wazuh agent variables file is located in ``etc/kayobe/inventory/group_vars/wazuh-agent/wazuh-agent``. @@ -318,13 +316,13 @@ Deploy the Wazuh agents: ``kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/wazuh-agent.yml`` Verification -============= +------------ The Wazuh agents should register with the Wazuh manager. This can be verified via the agents page in Wazuh Portal. Check CIS benchmark output in agent section. -Additional resources: -===================== +Additional resources +-------------------- For times when you need to upgrade wazuh with elasticsearch to version with opensearch or you just need to deinstall all wazuh components: Wazuh purge script: https://github.com/stackhpc/wazuh-server-purge