From 79b152067c1362e98dccc63b69ea5b99a480b043 Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Mon, 18 Sep 2023 11:56:06 +0200 Subject: [PATCH 1/2] feat: Added support for creating shared LVM setups - feature requested by GFS2 - adds support for creating shared VGs - shared LVM setup needs lvmlockd service with dlm lock manager to be running - to test this change ha_cluster system role is used to set up degenerated cluster on localhost - the test will be skipped if run locally due to an issue with underlying services - requires blivet version with shared LVM setup support (https://github.com/storaged-project/blivet/pull/1123) --- README.md | 11 +++ defaults/main.yml | 2 + library/blivet.py | 3 +- tests/collection-requirements.yml | 3 + tests/test-verify-pool.yml | 14 +++ tests/tests_lvm_pool_shared.yml | 152 ++++++++++++++++++++++++++++++ 6 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 tests/collection-requirements.yml create mode 100644 tests/tests_lvm_pool_shared.yml diff --git a/README.md b/README.md index 7f6f5c58..73bc3fc8 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,17 @@ keys: This specifies the type of pool to manage. Valid values for `type`: `lvm`. +- `shared` + + If set to `true`, the role creates or manages a shared volume group. Requires lvmlockd and + dlm services configured and running. + + Default: `false` + + __WARNING__: Modifying the `shared` value on an existing pool is a + destructive operation. The pool itself will be removed as part of the + process. + - `disks` A list which specifies the set of disks to use as backing storage for the pool. diff --git a/defaults/main.yml b/defaults/main.yml index 7703c982..755364ae 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -27,6 +27,8 @@ storage_pool_defaults: raid_chunk_size: null raid_metadata_version: null + shared: false + storage_volume_defaults: state: "present" type: lvm diff --git a/library/blivet.py b/library/blivet.py index 5e03a9d8..db4d398f 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -1527,7 +1527,7 @@ def _create(self): if not self._device: members = self._manage_encryption(self._create_members()) try: - pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) + pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members, shared=self._pool['shared']) except Exception as e: raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e))) @@ -1823,6 +1823,7 @@ def run_module(): raid_spare_count=dict(type='int'), raid_metadata_version=dict(type='str'), raid_chunk_size=dict(type='str'), + shared=dict(type='bool'), state=dict(type='str', default='present', choices=['present', 'absent']), type=dict(type='str'), volumes=dict(type='list', elements='dict', default=list(), diff --git a/tests/collection-requirements.yml b/tests/collection-requirements.yml new file mode 100644 index 00000000..f85d0521 --- /dev/null +++ b/tests/collection-requirements.yml @@ -0,0 +1,3 @@ +--- +collections: + - fedora.linux_system_roles diff --git a/tests/test-verify-pool.yml b/tests/test-verify-pool.yml index 55efa196..2ba1b4db 100644 --- a/tests/test-verify-pool.yml +++ b/tests/test-verify-pool.yml @@ -15,6 +15,20 @@ # compression # deduplication +- name: Get VG shared value status + command: vgs --noheadings --binary -o shared {{ storage_test_pool.name }} + register: vgs_dump + when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present' + changed_when: false + +- name: Verify that VG shared value checks out + assert: + that: (storage_test_pool.shared | bool) and ('1' in vgs_dump.stdout) + msg: >- + Shared VG presence ({{ storage_test_pool.shared }}) + does not match its expected state ({{ '1' in vgs_dump.stdout }}) + when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present' + - name: Verify pool subset include_tasks: "test-verify-pool-{{ storage_test_pool_subset }}.yml" loop: "{{ _storage_pool_tests }}" diff --git a/tests/tests_lvm_pool_shared.yml b/tests/tests_lvm_pool_shared.yml new file mode 100644 index 00000000..9a0c04c1 --- /dev/null +++ b/tests/tests_lvm_pool_shared.yml @@ -0,0 +1,152 @@ +--- +- name: Test LVM shared pools + hosts: all + become: true + vars: + storage_safe_mode: false + storage_use_partitions: true + mount_location1: '/opt/test1' + volume1_size: '4g' + + tasks: + - name: Change node from 'localhost' to '127.0.0.1' + set_fact: + inventory_hostname: "127.0.0.1" # noqa: var-naming + when: inventory_hostname == "localhost" + + - name: Run the role to install blivet + include_role: + name: linux-system-roles.storage + + - name: Gather package facts + package_facts: + + - name: Set blivet package name + set_fact: + blivet_pkg_name: "{{ ansible_facts.packages | + select('search', 'blivet') | select('search', 'python') | list }}" + + - name: Set blivet package version + set_fact: + blivet_pkg_version: "{{ + ansible_facts.packages[blivet_pkg_name[0]][0]['version'] + + '-' + ansible_facts.packages[blivet_pkg_name[0]][0]['release'] }}" + + - name: Set distribution version + set_fact: + is_rhel9: "{{ (ansible_facts.distribution == 'CentOS' or + ansible_facts.distribution == 'Enterprise Linux' or + ansible_facts.distribution == 'RedHat') and + ansible_facts.distribution_major_version == '9' }}" + is_rhel8: "{{ (ansible_facts.distribution == 'CentOS' or + ansible_facts.distribution == 'Enterprise Linux' or + ansible_facts.distribution == 'RedHat') and + ansible_facts.distribution_major_version == '8' }}" + is_fedora: "{{ ansible_facts.distribution == 'Fedora' }}" + + - name: Skip test if the blivet version does not support shared VGs + meta: end_host + when: ((is_fedora and blivet_pkg_version is version("3.8.2-1", "<")) or + (is_rhel8 and blivet_pkg_version is version("3.6.0-8", "<")) or + (is_rhel9 and blivet_pkg_version is version("3.6.0-11", "<"))) + + - name: Create cluster + ansible.builtin.include_role: + name: fedora.linux_system_roles.ha_cluster + vars: + ha_cluster_cluster_name: rhel9-1node + # Users should vault-encrypt the password + ha_cluster_hacluster_password: hapasswd + ha_cluster_extra_packages: + - dlm + - lvm2-lockd + ha_cluster_cluster_properties: + - attrs: + # Don't do this in production + - name: stonith-enabled + value: 'false' + ha_cluster_resource_primitives: + - id: dlm + agent: 'ocf:pacemaker:controld' + instance_attrs: + - attrs: + # Don't do this in production + - name: allow_stonith_disabled + value: 'true' + - id: lvmlockd + agent: 'ocf:heartbeat:lvmlockd' + ha_cluster_resource_groups: + - id: locking + resource_ids: + - dlm + - lvmlockd + + - name: Get unused disks + include_tasks: get_unused_disk.yml + vars: + max_return: 1 + + - name: >- + Create a disk device; specify disks as non-list mounted on + {{ mount_location }} + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + shared: true + state: present + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Repeat the previous step to verify idempotence + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + shared: true + state: present + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: >- + Remove the device created above + {{ mount_location }} + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: vg1 + disks: "{{ unused_disks }}" + type: lvm + shared: true + state: absent + volumes: + - name: lv1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + + - name: Verify role results + include_tasks: verify-role-results.yml + + - name: Remove cluster + ansible.builtin.include_role: + name: fedora.linux_system_roles.ha_cluster + vars: + ha_cluster_cluster_name: rhel9-1node + ha_cluster_cluster_present: false From 7d8b9537230fe6d55278b5acc76a14c59550495b Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Thu, 30 Nov 2023 14:31:04 +0100 Subject: [PATCH 2/2] Added blivet backwards compatibility for shared VGs Older versions of blivet do not support 'shared' parameter. This resulted in failures in tests unrelated to shared VGs. This change fixes that behavior as well as fixes minor condition error in a test. --- library/blivet.py | 9 ++++++++- tests/test-verify-pool.yml | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/library/blivet.py b/library/blivet.py index db4d398f..2d5c5427 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -1527,7 +1527,14 @@ def _create(self): if not self._device: members = self._manage_encryption(self._create_members()) try: - pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members, shared=self._pool['shared']) + if self._pool['shared']: + pool_device = self._blivet.new_vg(name=self._pool['name'], + parents=members, + shared=self._pool['shared']) + else: + # Makes it compatible with older blivet versions that does not support shared VGs + pool_device = self._blivet.new_vg(name=self._pool['name'], + parents=members) except Exception as e: raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e))) diff --git a/tests/test-verify-pool.yml b/tests/test-verify-pool.yml index 2ba1b4db..baacc651 100644 --- a/tests/test-verify-pool.yml +++ b/tests/test-verify-pool.yml @@ -23,7 +23,7 @@ - name: Verify that VG shared value checks out assert: - that: (storage_test_pool.shared | bool) and ('1' in vgs_dump.stdout) + that: (storage_test_pool.shared | bool) == ('1' in vgs_dump.stdout) msg: >- Shared VG presence ({{ storage_test_pool.shared }}) does not match its expected state ({{ '1' in vgs_dump.stdout }})