From a67f6b81e4e947a66d832c143554a76e2ef3a123 Mon Sep 17 00:00:00 2001
From: sean-freeman <1815807+sean-freeman@users.noreply.github.com>
Date: Thu, 1 Feb 2024 23:03:34 +0000
Subject: [PATCH] init
---
.ansible-lint | 32 +
.gitattributes | 1 +
...-lint-sap_hypervisor_node_preconfigure.yml | 43 +
.../ansible-lint-sap_vm_preconfigure.yml | 43 +
.../ansible-lint-sap_vm_provision.yml | 46 +
.../ansible-lint-sap_vm_temp_vip.yml | 45 +
.../workflows/ansible-lint-sap_vm_verify.yml | 46 +
.github/workflows/ansible-lint.yml | 54 +
.github/workflows/ansible-test-sanity.yml | 53 +
.github/workflows/codespell.yml | 21 +
.gitignore | 64 +
.pre-commit-config.yaml | 39 +
.pylintrc | 3 +
.yamllint.yml | 21 +
CHANGELOG.rst | 14 +
LICENSE | 201 ++++
README.md | 37 +
docs/CONTRIBUTORS.md | 17 +
docs/README.md | 100 ++
galaxy.yml | 66 ++
meta/runtime.yml | 2 +
...ypervisor-redhat-ocp-virt-preconfigure.yml | 19 +
...ypervisor-redhat-ocp-virt-preconfigure.yml | 127 ++
requirements-dev.txt | 1 +
requirements.yml | 37 +
.../.ansible-lint | 16 +
.../.yamllint.yml | 21 +
.../README.md | 135 +++
.../defaults/main.yml | 123 ++
.../files/platform/ibmpower_phyp/.gitkeep | 0
.../files/platform/redhat_ocp_virt/.gitkeep | 0
.../files/platform/vmware_vsphere/.gitkeep | 0
.../handlers/main.yml | 3 +
.../handlers/platform/ibmpower_phyp/.gitkeep | 0
.../platform/redhat_ocp_virt/.gitkeep | 0
.../platform/redhat_rhel_kvm/main.yml | 93 ++
.../handlers/platform/vmware_vsphere/.gitkeep | 0
.../meta/main.yml | 14 +
.../tasks/main.yml | 6 +
.../tasks/platform/ibmpower_phyp/.gitkeep | 0
.../redhat_ocp_virt/99-kargs-worker.yml.j2 | 17 +
.../redhat_ocp_virt/configure-worker-node.yml | 19 +
.../redhat_ocp_virt/create-sap-bridge.yml | 49 +
.../redhat_ocp_virt/install-cnv-operator.yml | 74 ++
.../platform/redhat_ocp_virt/install-hpp.yml | 89 ++
.../install-nmstate-operator.yml | 70 ++
.../install-sriov-operator.yml | 54 +
.../redhat_ocp_virt/install-trident.yml | 48 +
.../redhat_ocp_virt/install-virtctl.yml | 15 +
.../tasks/platform/redhat_ocp_virt/kargs.yml | 11 +
.../redhat_ocp_virt/label-worker-invtsc.yml | 11 +
.../tasks/platform/redhat_ocp_virt/main.yml | 104 ++
.../platform/redhat_ocp_virt/node-network.yml | 103 ++
.../platform/redhat_ocp_virt/prepare.yml | 16 +
.../redhat_ocp_virt/setup-worker-nodes.yml | 81 ++
.../sriov-enabled-unsupported-nics.sh | 3 +
.../redhat_ocp_virt/trident-backend.json.j2 | 18 +
.../redhat_ocp_virt/tuned-virtual-host.yml | 21 +
.../tasks/platform/redhat_rhel_kvm/50_hana | 65 ++
.../redhat_rhel_kvm/50_iothread_pinning | 64 +
.../allocate-hugepages-at-runtime.yml | 27 +
.../redhat_rhel_kvm/assert-configuration.yml | 136 +++
.../redhat_rhel_kvm/assert-installation.yml | 14 +
.../redhat_rhel_kvm/assert-rhv-hooks.yml | 37 +
.../assert-set-tuned-profile.yml | 14 +
.../redhat_rhel_kvm/configuration.yml | 132 +++
.../platform/redhat_rhel_kvm/installation.yml | 7 +
.../tasks/platform/redhat_rhel_kvm/main.yml | 28 +
.../platform/redhat_rhel_kvm/rhv-hooks.yml | 16 +
.../redhat_rhel_kvm/set-tuned-profile.yml | 44 +
.../tasks/platform/vmware_vsphere/.gitkeep | 0
.../vars/main.yml | 1 +
.../platform_defaults_redhat_ocp_virt.yml | 30 +
.../platform_defaults_redhat_rhel_kvm.yml | 39 +
roles/sap_vm_preconfigure/.ansible-lint | 16 +
roles/sap_vm_preconfigure/.yamllint.yml | 21 +
roles/sap_vm_preconfigure/README.md | 97 ++
roles/sap_vm_preconfigure/defaults/main.yml | 11 +
.../platform/cloud_aliyun_ecs_vm/.gitkeep | 0
.../files/platform/cloud_aws_ec2_vs/.gitkeep | 0
.../files/platform/cloud_gcp_ce_vm/.gitkeep | 0
.../platform/cloud_ibmcloud_powervs/.gitkeep | 0
.../files/platform/cloud_ibmcloud_vs/.gitkeep | 0
.../files/platform/cloud_msazure_vm/.gitkeep | 0
.../files/platform/ibmpower_lpar/.gitkeep | 0
.../platform/redhat_ocp_virt_vm/.gitkeep | 0
.../tuned/sap-hana-kvm-guest/haltpoll.sh | 7 +
.../tuned/sap-hana-kvm-guest/tuned.conf | 24 +
.../tuned/sap-hana/tuned.conf | 24 +
.../files/platform/vmware_vsphere_vm/.gitkeep | 0
roles/sap_vm_preconfigure/handlers/main.yml | 4 +
.../platform/cloud_aliyun_ecs_vm/.gitkeep | 0
.../platform/cloud_aws_ec2_vs/.gitkeep | 0
.../platform/cloud_gcp_ce_vm/.gitkeep | 0
.../platform/cloud_ibmcloud_powervs/.gitkeep | 0
.../platform/cloud_ibmcloud_vs/.gitkeep | 0
.../platform/cloud_msazure_vm/.gitkeep | 0
.../handlers/platform/ibmpower_lpar/.gitkeep | 0
.../platform/redhat_ocp_virt_vm/.gitkeep | 0
.../platform/redhat_rhel_kvm_vm/main.yml | 94 ++
.../platform/vmware_vsphere_vm/.gitkeep | 0
roles/sap_vm_preconfigure/meta/main.yml | 10 +
.../tasks/detect_platform/main.yml | 131 +++
roles/sap_vm_preconfigure/tasks/main.yml | 20 +
.../platform/cloud_aliyun_ecs_vm/.gitkeep | 0
.../tasks/platform/cloud_aws_ec2_vs/.gitkeep | 0
.../tasks/platform/cloud_gcp_ce_vm/.gitkeep | 0
.../platform/cloud_ibmcloud_powervs/.gitkeep | 0
.../tasks/platform/cloud_ibmcloud_vs/.gitkeep | 0
.../tasks/platform/cloud_msazure_vm/.gitkeep | 0
.../tasks/platform/hyp_ibmpower_lpar/.gitkeep | 0
.../platform/hyp_redhat_ocp_virt_vm/.gitkeep | 0
.../assert-set-tuned-profile.yml | 14 +
.../platform/hyp_redhat_rhel_kvm_vm/main.yml | 18 +
.../set-tuned-profile.yml | 91 ++
.../platform/hyp_vmware_vsphere_vm/.gitkeep | 0
roles/sap_vm_preconfigure/vars/main.yml | 1 +
.../platform_defaults_redhat_rhel_kvm.yml | 11 +
roles/sap_vm_provision/PLATFORM_GUIDANCE.md | 299 +++++
roles/sap_vm_provision/README.md | 183 +++
roles/sap_vm_provision/defaults/main.yml | 572 +++++++++
roles/sap_vm_provision/meta/main.yml | 13 +
roles/sap_vm_provision/meta/runtime.yml | 2 +
.../tasks/common/register_os.yml | 92 ++
.../tasks/common/register_proxy.yml | 64 +
.../tasks/common/set_ansible_vars.yml | 165 +++
.../tasks/common/set_ansible_vars_storage.yml | 42 +
.../tasks/common/set_etc_hosts.yml | 97 ++
.../tasks/common/set_etc_hosts_ha.yml | 161 +++
.../tasks/common/set_etc_hosts_scaleout.yml | 62 +
roles/sap_vm_provision/tasks/main.yml | 24 +
.../aws_ec2_vs/execute_main.yml | 138 +++
.../aws_ec2_vs/execute_provision.yml | 183 +++
.../aws_ec2_vs/execute_setup_ha.yml | 443 +++++++
.../aws_ec2_vs/post_deployment_execute.yml | 5 +
.../gcp_ce_vm/execute_main.yml | 260 +++++
.../gcp_ce_vm/execute_provision.yml | 197 ++++
.../gcp_ce_vm/execute_setup_ha.yml | 767 ++++++++++++
.../gcp_ce_vm/post_deployment_execute.yml | 110 ++
.../ibmcloud_powervs/execute_main.yml | 261 +++++
.../ibmcloud_powervs/execute_provision.yml | 205 ++++
.../post_deployment_execute.yml | 5 +
.../ibmcloud_vs/execute_main.yml | 176 +++
.../ibmcloud_vs/execute_provision.yml | 179 +++
.../ibmcloud_vs/execute_setup_ha.yml | 1027 +++++++++++++++++
.../ibmcloud_vs/post_deployment_execute.yml | 311 +++++
.../ibmpowervm_vm/execute_main.yml | 127 ++
.../ibmpowervm_vm/execute_provision.yml | 357 ++++++
.../ibmpowervm_vm/post_deployment_execute.yml | 5 +
.../kubevirt_vm/execute_main.yml | 116 ++
.../kubevirt_vm/execute_provision.yml | 270 +++++
.../kubevirt_vm/post_deployment_execute.yml | 5 +
.../msazure_vm/execute_main.yml | 167 +++
.../msazure_vm/execute_provision.yml | 258 +++++
.../msazure_vm/execute_setup_ha.yml | 562 +++++++++
.../msazure_vm/post_deployment_execute.yml | 278 +++++
.../ovirt_vm/execute_main.yml | 101 ++
.../ovirt_vm/execute_provision.yml | 297 +++++
.../ovirt_vm/post_deployment_execute.yml | 5 +
.../vmware_vm/execute_main.yml | 107 ++
.../vmware_vm/execute_provision.yml | 378 ++++++
.../vmware_vm/post_deployment_execute.yml | 5 +
.../aws_ec2_vs/execute_main.yml | 184 +++
.../aws_ec2_vs/tf_template/tf_template.tf | 205 ++++
.../tf_template/tf_template_input_vars.tf | 234 ++++
.../tf_template/tf_template_outputs.tf | 61 +
.../gcp_ce_vm/execute_main.yml | 180 +++
.../gcp_ce_vm/tf_template/tf_template.tf | 201 ++++
.../tf_template/tf_template_input_vars.tf | 267 +++++
.../tf_template/tf_template_outputs.tf | 61 +
.../ibmcloud_powervs/execute_main.yml | 180 +++
.../tf_template/tf_template.tf | 319 +++++
.../tf_template/tf_template_input_vars.tf | 347 ++++++
.../tf_template/tf_template_outputs.tf | 44 +
.../ibmcloud_vs/execute_main.yml | 180 +++
.../ibmcloud_vs/tf_template/tf_template.tf | 223 ++++
.../tf_template/tf_template_input_vars.tf | 258 +++++
.../tf_template/tf_template_outputs.tf | 61 +
.../ibmpowervm_vm/execute_main.yml | 182 +++
.../ibmpowervm_vm/tf_template/tf_template.tf | 86 ++
.../tf_template/tf_template_input_vars.tf | 182 +++
.../tf_template/tf_template_outputs.tf | 24 +
.../kubevirt_vm/execute_main.yml | 5 +
.../msazure_vm/execute_main.yml | 185 +++
.../msazure_vm/tf_template/tf_template.tf | 263 +++++
.../tf_template/tf_template_input_vars.tf | 267 +++++
.../tf_template/tf_template_outputs.tf | 61 +
.../ovirt_vm/execute_main.yml | 5 +
.../vmware_vm/execute_main.yml | 183 +++
.../vmware_vm/tf_template/tf_template.tf | 85 ++
.../tf_template/tf_template_input_vars.tf | 168 +++
.../tf_template/tf_template_outputs.tf | 24 +
roles/sap_vm_temp_vip/README.md | 81 ++
roles/sap_vm_temp_vip/defaults/main.yml | 8 +
roles/sap_vm_temp_vip/meta/main.yml | 13 +
roles/sap_vm_temp_vip/meta/runtime.yml | 2 +
.../tasks/identify_network_interface.yml | 31 +
roles/sap_vm_temp_vip/tasks/main.yml | 14 +
roles/sap_vm_temp_vip/tasks/set_temp_vip.yml | 126 ++
.../tasks/set_temp_vip_lb_listener.yml | 48 +
roles/sap_vm_verify/README.md | 75 ++
roles/sap_vm_verify/defaults/main.yml | 26 +
roles/sap_vm_verify/meta/main.yml | 13 +
roles/sap_vm_verify/meta/runtime.yml | 2 +
.../tasks/check_network_interconnectivity.yml | 252 ++++
.../tasks/check_network_performance.yml | 3 +
.../tasks/check_storage_generic.yml | 19 +
.../sap_vm_verify/tasks/check_storage_nfs.yml | 24 +
.../tasks/check_storage_performance.yml | 3 +
roles/sap_vm_verify/tasks/main.yml | 22 +
.../tasks/platform/aws_ec2_vs/.gitkeep | 0
.../tasks/platform/gcp_ce_vm/.gitkeep | 0
.../tasks/platform/ibmcloud_powervs/.gitkeep | 0
.../tasks/platform/ibmcloud_vs/.gitkeep | 0
.../tasks/platform/ibmpowervm_vm/.gitkeep | 0
.../tasks/platform/kubevirt_vm/.gitkeep | 0
.../tasks/platform/msazure_vm/.gitkeep | 0
.../tasks/platform/ovirt_vm/.gitkeep | 0
.../tasks/platform_checks_temp.yml | 11 +
219 files changed, 18173 insertions(+)
create mode 100644 .ansible-lint
create mode 100644 .gitattributes
create mode 100644 .github/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml
create mode 100644 .github/workflows/ansible-lint-sap_vm_preconfigure.yml
create mode 100644 .github/workflows/ansible-lint-sap_vm_provision.yml
create mode 100644 .github/workflows/ansible-lint-sap_vm_temp_vip.yml
create mode 100644 .github/workflows/ansible-lint-sap_vm_verify.yml
create mode 100644 .github/workflows/ansible-lint.yml
create mode 100644 .github/workflows/ansible-test-sanity.yml
create mode 100644 .github/workflows/codespell.yml
create mode 100644 .gitignore
create mode 100644 .pre-commit-config.yaml
create mode 100644 .pylintrc
create mode 100644 .yamllint.yml
create mode 100644 CHANGELOG.rst
create mode 100644 LICENSE
create mode 100644 README.md
create mode 100644 docs/CONTRIBUTORS.md
create mode 100644 docs/README.md
create mode 100644 galaxy.yml
create mode 100644 meta/runtime.yml
create mode 100644 playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
create mode 100644 playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
create mode 100644 requirements-dev.txt
create mode 100644 requirements.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/.ansible-lint
create mode 100644 roles/sap_hypervisor_node_preconfigure/.yamllint.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/README.md
create mode 100644 roles/sap_hypervisor_node_preconfigure/defaults/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/files/platform/ibmpower_phyp/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/files/platform/redhat_ocp_virt/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/files/platform/vmware_vsphere/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/handlers/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/handlers/platform/ibmpower_phyp/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_ocp_virt/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/handlers/platform/vmware_vsphere/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/meta/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/ibmpower_phyp/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_hana
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_iothread_pinning
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/allocate-hugepages-at-runtime.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/installation.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/tasks/platform/vmware_vsphere/.gitkeep
create mode 100644 roles/sap_hypervisor_node_preconfigure/vars/main.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml
create mode 100644 roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
create mode 100644 roles/sap_vm_preconfigure/.ansible-lint
create mode 100644 roles/sap_vm_preconfigure/.yamllint.yml
create mode 100644 roles/sap_vm_preconfigure/README.md
create mode 100644 roles/sap_vm_preconfigure/defaults/main.yml
create mode 100644 roles/sap_vm_preconfigure/files/platform/cloud_aliyun_ecs_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/cloud_aws_ec2_vs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/cloud_gcp_ce_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/cloud_ibmcloud_powervs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/cloud_ibmcloud_vs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/cloud_msazure_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/ibmpower_lpar/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/redhat_ocp_virt_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/haltpoll.sh
create mode 100644 roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/tuned.conf
create mode 100644 roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana/tuned.conf
create mode 100644 roles/sap_vm_preconfigure/files/platform/vmware_vsphere_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/main.yml
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/cloud_aliyun_ecs_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/cloud_aws_ec2_vs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/cloud_gcp_ce_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/cloud_ibmcloud_powervs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/cloud_ibmcloud_vs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/cloud_msazure_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/ibmpower_lpar/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/redhat_ocp_virt_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml
create mode 100644 roles/sap_vm_preconfigure/handlers/platform/vmware_vsphere_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/meta/main.yml
create mode 100644 roles/sap_vm_preconfigure/tasks/detect_platform/main.yml
create mode 100644 roles/sap_vm_preconfigure/tasks/main.yml
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/cloud_aliyun_ecs_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/cloud_aws_ec2_vs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/cloud_gcp_ce_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/cloud_ibmcloud_powervs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/cloud_ibmcloud_vs/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/cloud_msazure_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/hyp_ibmpower_lpar/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_ocp_virt_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml
create mode 100644 roles/sap_vm_preconfigure/tasks/platform/hyp_vmware_vsphere_vm/.gitkeep
create mode 100644 roles/sap_vm_preconfigure/vars/main.yml
create mode 100644 roles/sap_vm_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
create mode 100644 roles/sap_vm_provision/PLATFORM_GUIDANCE.md
create mode 100644 roles/sap_vm_provision/README.md
create mode 100644 roles/sap_vm_provision/defaults/main.yml
create mode 100644 roles/sap_vm_provision/meta/main.yml
create mode 100644 roles/sap_vm_provision/meta/runtime.yml
create mode 100644 roles/sap_vm_provision/tasks/common/register_os.yml
create mode 100644 roles/sap_vm_provision/tasks/common/register_proxy.yml
create mode 100644 roles/sap_vm_provision/tasks/common/set_ansible_vars.yml
create mode 100644 roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml
create mode 100644 roles/sap_vm_provision/tasks/common/set_etc_hosts.yml
create mode 100644 roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml
create mode 100644 roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml
create mode 100644 roles/sap_vm_provision/tasks/main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/post_deployment_execute.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/kubevirt_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ovirt_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf
create mode 100644 roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_outputs.tf
create mode 100644 roles/sap_vm_temp_vip/README.md
create mode 100644 roles/sap_vm_temp_vip/defaults/main.yml
create mode 100644 roles/sap_vm_temp_vip/meta/main.yml
create mode 100644 roles/sap_vm_temp_vip/meta/runtime.yml
create mode 100644 roles/sap_vm_temp_vip/tasks/identify_network_interface.yml
create mode 100644 roles/sap_vm_temp_vip/tasks/main.yml
create mode 100644 roles/sap_vm_temp_vip/tasks/set_temp_vip.yml
create mode 100644 roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml
create mode 100644 roles/sap_vm_verify/README.md
create mode 100644 roles/sap_vm_verify/defaults/main.yml
create mode 100644 roles/sap_vm_verify/meta/main.yml
create mode 100644 roles/sap_vm_verify/meta/runtime.yml
create mode 100644 roles/sap_vm_verify/tasks/check_network_interconnectivity.yml
create mode 100644 roles/sap_vm_verify/tasks/check_network_performance.yml
create mode 100644 roles/sap_vm_verify/tasks/check_storage_generic.yml
create mode 100644 roles/sap_vm_verify/tasks/check_storage_nfs.yml
create mode 100644 roles/sap_vm_verify/tasks/check_storage_performance.yml
create mode 100644 roles/sap_vm_verify/tasks/main.yml
create mode 100644 roles/sap_vm_verify/tasks/platform/aws_ec2_vs/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/gcp_ce_vm/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/ibmcloud_powervs/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/ibmcloud_vs/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/ibmpowervm_vm/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/kubevirt_vm/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/msazure_vm/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform/ovirt_vm/.gitkeep
create mode 100644 roles/sap_vm_verify/tasks/platform_checks_temp.yml
diff --git a/.ansible-lint b/.ansible-lint
new file mode 100644
index 0000000..9ce20e4
--- /dev/null
+++ b/.ansible-lint
@@ -0,0 +1,32 @@
+---
+# Collection wide lint-file
+# DO NOT CHANGE
+exclude_paths:
+ - .cache/
+ - .github/
+ #- docs/
+ - roles/sap_hypervisor_node_preconfigure
+ #- roles/sap_vm_provision
+ - roles/sap_vm_preconfigure
+
+enable_list:
+ - yaml
+
+skip_list:
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
+ - yaml[comments]
+ - yaml[line-length]
+ - no-changed-when
+ - no-tabs
+ - no-handler
+ - jinja[spacing]
+ - var-naming[no-jinja]
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..b2a76a8
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.yml linguist-detectable
\ No newline at end of file
diff --git a/.github/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml b/.github/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml
new file mode 100644
index 0000000..bafd47d
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml
@@ -0,0 +1,43 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_hypervisor_node_preconfigure
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_hypervisor_node_preconfigure/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_hypervisor_node_preconfigure/**'
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@main
+
+ - name: Set up Python 3
+ uses: actions/setup-python@main
+ with:
+ python-version: '3.9'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==7.5.0
+ pip3 install ansible-compat==3.0.2
+ pip3 install ansible-core==2.14.5
+ pip3 install ansible-lint==6.8.6
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_infrastructure/community.sap_infrastructure/roles/sap_hypervisor_node_preconfigure
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint-sap_vm_preconfigure.yml b/.github/workflows/ansible-lint-sap_vm_preconfigure.yml
new file mode 100644
index 0000000..1470dd0
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_vm_preconfigure.yml
@@ -0,0 +1,43 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_vm_preconfigure
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_preconfigure/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_preconfigure/**'
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@main
+
+ - name: Set up Python 3
+ uses: actions/setup-python@main
+ with:
+ python-version: '3.12'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_infrastructure/community.sap_infrastructure/roles/sap_vm_preconfigure
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint-sap_vm_provision.yml b/.github/workflows/ansible-lint-sap_vm_provision.yml
new file mode 100644
index 0000000..da65490
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_vm_provision.yml
@@ -0,0 +1,46 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_vm_provision
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_provision/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_provision/**'
+
+ workflow_dispatch:
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
+ pip3 install jmespath==1.0.1
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_infrastructure/community.sap_infrastructure/roles/sap_vm_provision
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint-sap_vm_temp_vip.yml b/.github/workflows/ansible-lint-sap_vm_temp_vip.yml
new file mode 100644
index 0000000..f4603f5
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_vm_temp_vip.yml
@@ -0,0 +1,45 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_vm_temp_vip
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_temp_vip/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_temp_vip/**'
+
+ workflow_dispatch:
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_infrastructure/community.sap_infrastructure/roles/sap_vm_temp_vip
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint-sap_vm_verify.yml b/.github/workflows/ansible-lint-sap_vm_verify.yml
new file mode 100644
index 0000000..2e279dc
--- /dev/null
+++ b/.github/workflows/ansible-lint-sap_vm_verify.yml
@@ -0,0 +1,46 @@
+---
+
+# Workflow for ansible-lint of a role
+
+name: ansible-lint of the role sap_vm_verify
+
+on:
+ push:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_verify/**'
+ pull_request:
+ branches:
+ - main
+ - dev
+ paths:
+ - 'roles/sap_vm_verify/**'
+
+ workflow_dispatch:
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
+ pip3 install jmespath==1.0.1
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/work/community.sap_infrastructure/community.sap_infrastructure/roles/sap_vm_verify
+ run: ansible-lint
diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml
new file mode 100644
index 0000000..2ac44be
--- /dev/null
+++ b/.github/workflows/ansible-lint.yml
@@ -0,0 +1,54 @@
+---
+
+# Workflow for ansible-lint of the collection
+
+name: ansible-lint of the collection
+
+on:
+ schedule:
+ - cron: '31 13 * * 1'
+
+ workflow_dispatch:
+
+jobs:
+ ansible-lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies for Python
+ run: |
+ pip3 install ansible==9.1.0
+ pip3 install ansible-compat==4.1.10
+ pip3 install ansible-core==2.16.2
+ pip3 install ansible-lint==6.22.1
+ pip3 install jmespath==1.0.1
+
+ - name: Install dependencies for Ansible
+ run: ansible-galaxy collection install -r /home/runner/work/community.sap_infrastructure/community.sap_infrastructure/requirements.yml
+
+ - name: Move the collection to the correct location - Create new directory
+ run: mkdir -p /home/runner/.ansible/collections/ansible_collections/community
+
+ - name: Move the collection to the correct location - Move the collection
+ working-directory: /home/runner/
+ run: mv /home/runner/work/community.sap_infrastructure/community.sap_infrastructure /home/runner/.ansible/collections/ansible_collections/community
+
+ - name: Move the collection to the correct location - Rename the directory
+ working-directory: /home/runner/
+ run: mv /home/runner/.ansible/collections/ansible_collections/community/community.sap_infrastructure /home/runner/.ansible/collections/ansible_collections/community/sap_infrastructure
+
+ - name: Run ansible-lint
+ working-directory: /home/runner/.ansible/collections/ansible_collections/community/sap_infrastructure
+ run: ansible-lint
+
+ - name: Move the collection to its previous location
+ working-directory: /home/runner/
+ run: mv /home/runner/.ansible/collections/ansible_collections/community/sap_infrastructure /home/runner/work/community.sap_infrastructure/community.sap_infrastructure
diff --git a/.github/workflows/ansible-test-sanity.yml b/.github/workflows/ansible-test-sanity.yml
new file mode 100644
index 0000000..6415f25
--- /dev/null
+++ b/.github/workflows/ansible-test-sanity.yml
@@ -0,0 +1,53 @@
+---
+
+# Worflow for ansible-test sanity tests
+
+name: ansible-test sanity of the collection
+
+on:
+ schedule:
+ - cron: '31 12 * * 1'
+
+ workflow_dispatch:
+
+jobs:
+ sanity:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the code
+ uses: actions/checkout@v4
+
+ - name: Set up Python 3
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install test dependencies
+ run: |
+ pip3 install ansible-lint==6.22.1
+ pip3 install ansible-compat==4.1.10
+
+# - name: Install collection dependencies
+# run: ansible-galaxy collection install community.general
+
+ - name: Move the collection to the correct location - Create new directory
+ run: mkdir -p /home/runner/.ansible/collections/ansible_collections/community
+
+ - name: Move the collection to the correct location - Move the collection
+ working-directory: /home/runner/
+ run: mv /home/runner/work/community.sap_infrastructure/community.sap_infrastructure /home/runner/.ansible/collections/ansible_collections/community
+
+ - name: Move the collection to the correct location - Rename the directory
+ working-directory: /home/runner/
+ run: mv /home/runner/.ansible/collections/ansible_collections/community/community.sap_infrastructure /home/runner/.ansible/collections/ansible_collections/community/sap_infrastructure
+
+
+ - name: Run sanity tests
+ working-directory: /home/runner/.ansible/collections/ansible_collections/community/sap_infrastructure
+ run: ansible-test sanity
+
+
+ - name: Move the collection to its previous location
+ working-directory: /home/runner/
+ run: mv /home/runner/.ansible/collections/ansible_collections/community/sap_infrastructure /home/runner/work/community.sap_infrastructure/community.sap_infrastructure
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
new file mode 100644
index 0000000..eb4e074
--- /dev/null
+++ b/.github/workflows/codespell.yml
@@ -0,0 +1,21 @@
+name: CodeSpell
+
+on:
+ push:
+ branches:
+ - dev
+ pull_request:
+ branches:
+ - dev
+
+jobs:
+ codespell:
+ name: Check for spelling errors
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ - uses: codespell-project/actions-codespell@v2
+ with:
+ # lowercase only
+ ignore_words_list: aas,hsa,te,chage,addopt,sybsystem,uptodate
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3469fb0
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,64 @@
+# .gitignore defines files to ignore and remain untracked in Git
+# Each line in a gitignore file specifies a pattern, eg. directory or file extension
+
+# Git should not track binary artifacts such as images, libraries, executables, archive files etc.
+# Until the team has mature processes, a Binary Artifacts Repository Manager is not in use.
+
+# Therefore some binary artifacts are tracked in this Git repository
+
+
+# Further .gitignore templates available at:
+# https://github.com/github/gitignore
+
+
+# macOS OS generated files
+.DS_Store
+._*
+.Spotlight-V100
+.Trashes
+
+# Windows OS generated files #
+ehthumbs.db
+Thumbs.db
+
+# Compressed Archives
+# git has built-in compression
+# *.7z
+# *.dmg
+# *.gz
+# *.iso
+# *.jar
+# *.rar
+# *.tar
+# *.zip
+
+# Binaries / Compiled source
+# *.com
+# *.class
+# *.dll
+# *.exe
+# *.o
+# *.so
+
+# Logs and databases
+# *.log
+# *.sqlite
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# VSCode
+.vscode
+
+# vi swap file
+*.swp
+
+# Local .terraform directories
+**/.terraform/*
+
+# .tfstate files
+*.tfstate
+*.tfstate.*
+.terraform.lock.hcl
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..cdf88a7
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,39 @@
+---
+exclude: '^$'
+fail_fast: true
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.3.0
+ hooks:
+ - id: end-of-file-fixer
+ - id: check-merge-conflict
+ - id: check-symlinks
+ types: ["symlink"]
+ types_or: ["symlink"]
+ stages: [manual]
+ - id: check-executables-have-shebangs
+ name: "Executable text files have shebangs"
+ - id: trailing-whitespace
+ args: [--markdown-linebreak-ext=md]
+ files: .*\.(xml|yaml|yml|md|adoc)$
+ - id: check-xml
+ - id: check-yaml
+ args:
+ - "-m"
+ # - repo: https://github.com/adrienverge/yamllint.git
+ # rev: v1.26.3
+ # hooks:
+ # - id: yamllint
+ # exclude: >
+ # (?x)^(
+ # examples/playbooks/templates/.*|
+ # examples/other/some.j2.yaml
+ # )$
+ # files: \.(yaml|yml)$
+ # types: [file, yaml]
+ # entry: yamllint --strict
+ - repo: https://github.com/ansible-community/ansible-lint.git
+ rev: v6.22.1 # put latest release tag from https://github.com/ansible-community/ansible-lint/releases/
+ hooks:
+ - id: ansible-lint
+ files: \.(yaml|yml)$
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000..3ceceb4
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,3 @@
+[MASTER]
+disable=
+ C0103, # invalid-name
diff --git a/.yamllint.yml b/.yamllint.yml
new file mode 100644
index 0000000..57ef427
--- /dev/null
+++ b/.yamllint.yml
@@ -0,0 +1,21 @@
+---
+# Based on ansible-lint config
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+# colons: {max-spaces-after: -1, level: error}
+# commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+# document-start: disable
+# empty-lines: {max: 3, level: error}
+# hyphens: {level: error}
+# indentation: disable
+# key-duplicates: enable
+ line-length: disable
+# new-line-at-end-of-file: disable
+# new-lines: {type: unix}
+# trailing-spaces: disable
+ truthy: disable
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
new file mode 100644
index 0000000..e60c7a4
--- /dev/null
+++ b/CHANGELOG.rst
@@ -0,0 +1,14 @@
+===================================
+community.sap_infrastructure Release Notes
+===================================
+
+.. contents:: Topics
+
+v1.0.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2024-02-02
+| Initial Release on Galaxy
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..3fb6980
--- /dev/null
+++ b/README.md
@@ -0,0 +1,37 @@
+# community.sap_infrastructure Ansible Collection
+
+![Ansible Lint](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint.yml/badge.svg?branch=main)
+
+This Ansible Collection executes various SAP Infrastructure related tasks, creating resources needed for hosts of SAP Systems.
+
+These Ansible Roles are often run first and combined with other Ansible Collections to provide end-to-end automation.
+
+Various Infrastructure Platforms (Cloud Hyperscalers and Hypervisors) are compatible and tested with this Ansible Collection.
+
+
+**Please read the [full documentation](./docs#readme) for how-to guidance, requirements, and all other details. Summary documentation is below:**
+
+
+## Contents
+
+Within this Ansible Collection, there are various Ansible Roles and no custom Ansible Modules.
+
+### Ansible Roles
+
+| Name | Summary |
+| :--- | :--- |
+| [sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_hypervisor_node_preconfigure)
`Beta` | Vendor-specific configuration preparation tasks for Hypervisor nodes hosting Virtual Machines running SAP Systems |
+| ~~[sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_preconfigure)~~
`WIP` | ~~Vendor-specific configuration preparation tasks for Virtual Machines running SAP Systems~~ |
+| [sap_vm_provision](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_provision) | Provision Virtual Machines to different Infrastructure Platforms; with optional Ansible to Terraform to provision minimal landing zone (partial compatibility via [Terraform Modules for SAP](https://github.com/sap-linuxlab/terraform.modules_for_sap)) |
+| [sap_vm_temp_vip](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_temp_vip)
`Beta` | Temporary Virtual IP (VIP) assigned to OS Network Interface prior to Linux Pacemaker ownership |
+| ~~[sap_vm_verify](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_verify)~~
`WIP` | ~~Verification of Virtual Machine state and readiness to perform SAP Software installation~~ |
+
+
+## License
+
+- [Apache 2.0](./LICENSE)
+
+
+## Contributors
+
+Contributors to the Ansible Roles within this Ansible Collection, are shown within [/docs/contributors](./docs/CONTRIBUTORS.md).
diff --git a/docs/CONTRIBUTORS.md b/docs/CONTRIBUTORS.md
new file mode 100644
index 0000000..d4e76f0
--- /dev/null
+++ b/docs/CONTRIBUTORS.md
@@ -0,0 +1,17 @@
+# Development contributors
+
+- **IBM Lab for SAP Solutions**
+ - IBM Cloud
+ - **Sean Freeman** - Developer of Ansible Collection and sap_vm_provision Ansible Role
+- **Red Hat**
+ - Red Hat for SAP CoP
+ - **Janine Fuchs** - Developer of Ansible parallelisation and OVirt capability for sap_vm_provision Ansible Role
+ - **Nils Koenig** - Developer of sap_hypervisor_node_preconfigure and KubeVirt capability for sap_vm_provision Ansible Role
+
+# New contributors
+
+This project is consistently seeking new contributors, particularly those who are interested in the familiarities and differences of various Infrastructure Platforms.
+
+All Pull Requests are welcomed and will be reviewed for quality. Each review will include an analysis of "commonality" and "modularity" across Infrastructure Platforms, to avoid introducing too many features which are bespoke to an individual Infrastructure Platform and thereby introducing differences in deployments.
+
+The guiding principles and instructions for new contributors is available on the SAP LinuxLab governance page.
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..e1c0c9a
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,100 @@
+# Documentation of community.sap_infrastructure Ansible Collection
+
+## Introduction
+
+The `sap_infrastructure` Ansible Collection executes various SAP Infrastructure related tasks, creating resources needed for hosts of SAP Systems.
+
+These Ansible Roles are often run first and combined with other Ansible Collections to provide end-to-end automation.
+
+
+## Functionality
+
+This Ansible Collection provides a variety of tasks related to SAP Infrastructure (networks, storage, compute). The code structure and logic has been separated to support a flexible execution of different steps for various Infrastructure Platforms and hosting options.
+
+At a high-level, the key functionality of this Ansible Collection includes:
+
+- Preconfigure Hypervisor nodes ready to host Virtual Machines running SAP Systems
+- Preconfigure Virtual Machines with specific tasks for the Infrastructure Platform
+- Provision Virtual Machines
+ - on target Infrastructure Platform, using Ansible or Ansible to Terraform (to perform minimal landing zone setup of an Infrastructure Platform)
+ - with High Availability resources if required for the Infrastructure Platform (e.g. Routing and Load Balancers on Cloud Hyperscalers)
+- Assignment of Temporary Virtual IP required for High Availability installations on selected Infrastructure Platforms
+
+
+Compatibility is available within the Ansible Collection for various Infrastructure Platforms:
+
+- Cloud Hyperscalers - AWS EC2 VS, GCP CE VM, IBM Cloud VS, IBM Power VS from IBM Cloud, MS Azure VM
+- Hypervisors - IBM PowerVM VM, OVirt VM, KubeVirt VM, VMware VM
+
+
+## Execution
+
+An Ansible Playbook is the file created and executed by an end-user, which imports from Ansible Collections to perform various activities on the target hosts.
+
+The Ansible Playbook can call either an Ansible Role, or directly call the individual Ansible Modules:
+
+- **Ansible Roles** (runs multiple Ansible Modules)
+- **Ansible Modules** (and adjoining Python/Bash Functions)
+
+It is strongly recommended to execute these Ansible Roles in accordance to best practice Ansible usage, where an Ansible Playbook is executed from a host and Ansible will login to a target host to perform the activities.
+
+> If an Ansible Playbook is executed from the target host itself (similar to logging in and running a shell script), this is known as an Ansible Playbook 'localhost execution' and is not recommended as it has limitations on SAP Software installations (particularly installations across multiple hosts).
+
+At a high-level, complex executions with various interlinked activities are run in parallel or sequentially using the following execution structure:
+
+```
+Ansible Playbook
+-> source Ansible Collection
+-> execute Ansible Task
+---> run Ansible Role
+-----> run Ansible Module (e.g. built-in Ansible Module for Shell)
+```
+
+### Execution examples
+
+There are various methods to execute the Ansible Collection, dependent on the use case.
+
+For more information, see [sample Ansible Playbooks in `/playbooks`](../playbooks/).
+
+
+## Requirements and Dependencies
+
+### Execution/Controller host - Operating System requirements
+
+Execution of Ansible Playbooks using this Ansible Collection have been tested with:
+- Python 3.9.7 and above (i.e. CPython distribution)
+- Ansible Core 2.12.0 and above _(included with optional installation of Ansible Community Edition 5.0 and above)_
+- OS: macOS with Homebrew, RHEL, SLES, and containers in Task Runners (e.g. Azure DevOps)
+
+#### Ansible Core version
+
+This Ansible Collection was designed for maximum backwards compatibility, with full compatibility starting from Ansible Core 2.12.0 and above.
+
+**Note 1:** Ansible 2.9 was the last release before the Ansible project was split into Ansible Core and Ansible Community Edition, and was before Ansible Collections functionality was introduced. This Ansible Collection should execute when Ansible 2.9 is used, but it is not recommended and errors should be expected (and will not be resolved).
+
+**Note 2:** Ansible Core versions prior to 2.14.12 , 2.15.8 , and 2.16.1 where `CVE-2023-5764` (templating inside `that` statement of `assert` Ansible Tasks) security fix was addressed, will work after `v1.3.4` of this Ansible Collection. Otherwise an error similar to the following will occur:
+
+```yaml
+fatal: [host01]: FAILED! =>
+ msg: 'The conditional check ''13 <= 128'' failed. The error was: Conditional is marked as unsafe, and cannot be evaluated.'
+```
+
+
+## Testing
+
+Various Infrastructure Platforms and SAP Software solutions have been extensively tested.
+
+Prior to each release, basic scenarios are executed to confirm functionality is working as expected; including SAP S/4HANA installation.
+
+Important note: it is not possible for the project maintainers to test every Infrastructure Platform setup and all SAP Software for each OS, if an error is identified please raise a [GitHub Issue](/../../issues/).
+
+
+### Ansible Roles Lint Status
+
+| Role Name | Ansible Lint Status |
+| :--- | :--- |
+| [sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_hypervisor_node_preconfigure) | [![Ansible Lint for sap_hypervisor_node_preconfigure](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_hypervisor_node_preconfigure.yml) |
+| [sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_preconfigure) | [![Ansible Lint for sap_vm_preconfigure](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_preconfigure.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_preconfigure.yml) |
+| [sap_vm_provision](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_provision) | [![Ansible Lint for sap_vm_provision](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_provision.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_provision.yml) |
+| [sap_vm_temp_vip](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_temp_vip) | [![Ansible Lint for sap_vm_temp_vip](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_temp_vip.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_temp_vip.yml) |
+| [sap_vm_verify](https://github.com/sap-linuxlab/community.sap_infrastructure/tree/main/roles/sap_vm_verify) | [![Ansible Lint for sap_vm_verify](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_verify.yml/badge.svg)](https://github.com/sap-linuxlab/community.sap_infrastructure/actions/workflows/ansible-lint-sap_vm_verify.yml) |
diff --git a/galaxy.yml b/galaxy.yml
new file mode 100644
index 0000000..7eb8551
--- /dev/null
+++ b/galaxy.yml
@@ -0,0 +1,66 @@
+---
+
+### REQUIRED
+# The namespace of the collection. This can be a company/brand/organization or product namespace under which all
+# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with
+# underscores or numbers and cannot contain consecutive underscores
+namespace: community
+
+# The name of the collection. Has the same character restrictions as 'namespace'
+name: sap_infrastructure
+
+# The version of the collection. Must be compatible with semantic versioning
+version: 1.0.0
+
+# The path to the Markdown (.md) readme file. This path is relative to the root of the collection
+readme: README.md
+
+# A list of the collection's content authors. Can be just the name or in the format 'Full Name (url)
+authors:
+ - Sean Freeman
+ - Janine Fuchs
+ - Nils Koenig
+
+### OPTIONAL but strongly recommended
+# A short summary description of the collection
+description: Collection of Ansible Roles for SAP Infrastructure related tasks
+
+# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only
+# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file'
+license:
+ - Apache-2.0
+
+# The path to the license file for the collection. This path is relative to the root of the collection. This key is
+# mutually exclusive with 'license'
+license_file: ''
+
+# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character
+# requirements as 'namespace' and 'name'
+tags:
+ - database
+ - application
+ - sap
+
+# Collections that this collection requires to be installed for it to be usable. The key of the dict is the
+# collection label 'namespace.name'. The value is a version range
+# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version
+# range specifiers can be set and are separated by ','
+dependencies: {}
+
+# The URL of the originating SCM repository
+repository: https://github.com/sap-linuxlab/community.sap_infrastructure
+
+# The URL to any online docs
+documentation: https://github.com/sap-linuxlab/sap-linuxlab.github.io/blob/master/README.md
+
+# The URL to the homepage of the collection/project
+homepage: https://sap-linuxlab.github.io
+
+# The URL to the collection issue tracker
+issues: https://github.com/sap-linuxlab/community.sap_infrastructure/issues
+
+# A list of file glob-like patterns used to filter any files or directories that should not be included in the build
+# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This
+# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry',
+# and '.git' are always filtered
+build_ignore: ['tests', 'internal-*']
diff --git a/meta/runtime.yml b/meta/runtime.yml
new file mode 100644
index 0000000..c2ea658
--- /dev/null
+++ b/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.12.0'
diff --git a/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml b/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
new file mode 100644
index 0000000..f60d69b
--- /dev/null
+++ b/playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
@@ -0,0 +1,19 @@
+---
+
+- name: Ansible Play to run sap_hypervisor_node_preconfigure Ansible Role
+ hosts: all
+ gather_facts: true
+ serial: 1
+
+ vars:
+ sap_hypervisor_node_platform: redhat_ocp_virt
+ sap_hypervisor_node_kubeconfig: "{{ lookup( 'ansible.builtin.env', 'KUBECONFIG') }}"
+
+ environment:
+ KUBECONFIG: "{{ sap_hypervisor_node_kubeconfig }}"
+ K8S_AUTH_KUBECONFIG: "{{ sap_hypervisor_node_kubeconfig }}"
+
+ tasks:
+ - name: Include sap_hypervisor_node_preconfigure Ansible Role
+ ansible.builtin.include_role:
+ name: sap_hypervisor_node_preconfigure
diff --git a/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml b/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
new file mode 100644
index 0000000..51b8445
--- /dev/null
+++ b/playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
@@ -0,0 +1,127 @@
+---
+
+sap_hypervisor_node_preconfigure_install_operators: true
+sap_hypervisor_node_preconfigure_setup_worker_nodes: true
+
+# Install the trident NFS storage provider. If yes, expects configuration details under
+# sap_hypervisor_node_preconfigure_cluster_config.trident, see example config.
+sap_hypervisor_node_preconfigure_install_trident: false # true, false
+# URL of the trident installer package to use
+sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v23.01.0/trident-installer-23.01.0.tar.gz
+
+# should SRIOV be enabled for unsupported NICs
+sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: true # true, false
+
+# Amount of memory [GB] to be reserved for the hypervisor on hosts >= 512GB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 #GB
+# Amount of memory [GB] to be reserved for the hypervisor on hosts < 512GB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 #GB
+
+# Should the check for the minimal amount of memory be ignored? Minimal amount is 96 GB
+# If ignored, the amount of $hostmemory - $reserved is allocated with a lower bound of 0 in case $reserved > $hostmemory
+sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: true # true, false
+
+# Define if the host path provisioner should be installed in order to use a local disk as storage device.
+# Uses the following variable to be set to the storage device to be used, e.g.:
+# sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device: /dev/sdb
+sap_hypervisor_node_preconfigure_install_hpp: true # true, false
+
+
+# Example configuration for redhat_ocp_virt
+sap_hypervisor_node_preconfigure_cluster_config:
+
+ # URL under which the OCP cluster is reachable
+ cluster_url: ocpcluster.domain.org
+
+ # namespace under which the VMs are created, note this has to be
+ # openshift-sriov-network-operator in case of using SRIOV network
+ # devices
+ vm_namespace: sap
+
+ # Optional, configuration for trident driver for Netapp NFS filer
+ trident:
+ management: management.domain.org
+ data: datalif.netapp.domain.org
+ svm: sap_svm
+ backend: nas_backend
+ aggregate: aggregate_Name
+ username: admin
+ password: xxxxx
+ storage_driver: ontap-nas
+ storage_prefix: ocpv_sap_
+
+ # CPU cores which will be reserved for kubernetes
+ worker_kubernetes_reserved_cpus: "0,1"
+
+ # Storage device used for host path provisioner as local storage.
+ worker_localstorage_device: /dev/vdb
+
+ # detailed configuration for every worker that should be configured
+ workers:
+
+ - name: worker-0 # name must match the node name
+ networks: # Example network config
+
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
+
+ - name: storagebridge # using a bridge
+ bridge: # another bridge
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens2f0 # network IF name
+ description: storage
+ mtu: 9000
+ ipv4:
+ address:
+ - ip: 192.168.1.51 # IP config
+ prefix-length: 24
+ auto-dns: false
+ auto-gateway: false
+ enabled: true
+ state: up
+ type: linux-bridge
+
+ - name: multi # another SRIOV device
+ interface: ens2f1 # network IF name
+ type: sriov
+
+ - name: worker-1 # second worker configuration
+ networks: # Example network config
+
+ - name: sapbridge # using a bridge
+ description: SAP bridge
+ state: up
+ type: linux-bridge
+ ipv4:
+ enabled: false
+ auto-gateway: false
+ auto-dns: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: ens1f0 # network IF name
+
+ - name: storage # an SRIOV device
+ interface: ens2f0 # network IF name
+ type: sriov
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..1fd3971
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1 @@
+ansible-test
diff --git a/requirements.yml b/requirements.yml
new file mode 100644
index 0000000..abeb348
--- /dev/null
+++ b/requirements.yml
@@ -0,0 +1,37 @@
+---
+
+collections:
+ - name: cloud.terraform
+ type: galaxy
+ version: 1.1.0
+ - name: amazon.aws
+ type: galaxy
+ version: 5.4.0
+ - name: community.aws
+ type: galaxy
+ version: 5.4.0
+ - name: azure.azcollection
+ type: galaxy
+ version: 1.15.0
+ - name: google.cloud
+ type: galaxy
+ version: 1.1.3
+ # Replace with ibm.cloud in future, legacy Ansible Collection uses hidden on-the-fly Terraform files in /var/tmp/ansible/ibmcloud)
+ - name: ibm.cloudcollection
+ type: galaxy
+ version: 1.51.0
+ - name: ovirt.ovirt
+ type: galaxy
+ version: 3.1.2
+ - name: openstack.cloud
+ type: galaxy
+ version: 2.1.0
+ - name: kubevirt.core
+ type: galaxy
+ version: 1.1.0
+ - name: vmware.vmware_rest
+ type: galaxy
+ version: 3.0.0
+ - name: cloud.common
+ type: galaxy
+ version: 3.0.0
diff --git a/roles/sap_hypervisor_node_preconfigure/.ansible-lint b/roles/sap_hypervisor_node_preconfigure/.ansible-lint
new file mode 100644
index 0000000..8a5df4d
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/.ansible-lint
@@ -0,0 +1,16 @@
+---
+exclude_paths:
+ - tests/
+enable_list:
+ - yaml
+skip_list:
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_hypervisor_node_preconfigure/.yamllint.yml b/roles/sap_hypervisor_node_preconfigure/.yamllint.yml
new file mode 100644
index 0000000..57ef427
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/.yamllint.yml
@@ -0,0 +1,21 @@
+---
+# Based on ansible-lint config
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+# colons: {max-spaces-after: -1, level: error}
+# commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+# document-start: disable
+# empty-lines: {max: 3, level: error}
+# hyphens: {level: error}
+# indentation: disable
+# key-duplicates: enable
+ line-length: disable
+# new-line-at-end-of-file: disable
+# new-lines: {type: unix}
+# trailing-spaces: disable
+ truthy: disable
diff --git a/roles/sap_hypervisor_node_preconfigure/README.md b/roles/sap_hypervisor_node_preconfigure/README.md
new file mode 100644
index 0000000..2d335d9
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/README.md
@@ -0,0 +1,135 @@
+`Beta`
+
+# sap_hypervisor_node_preconfigure
+
+Ansible Role for configuration of Hypervisor Nodes and Control Plane for hosting Virtual Machines with SAP Systems.
+
+This Ansible Role will configure the following hypervisors in order to run SAP workloads:
+- Red Hat OpenShift Virtualization (OCPV), i.e. KubeVirt
+- Red Hat Enterprise Virtualization (RHV), i.e. OVirt KVM
+
+
+## Functionality
+
+The hypervisor nodes for Virtual Machines hosting SAP Software are amended by the Ansible Role according to SAP Notes and best practices defined by jointly by the Hypervisor vendor and SAP. The majority of these alterations are to improve the performance of SAP Software with the Virtual Machine and the Hypervisor.
+
+
+## Scope
+
+All hosts for SAP Software on a target Hypervisor.
+
+
+## Requirements
+
+### Target hypervisor nodes
+
+**Hypervisor Versions:**
+- Red Hat OpenShift Virtualization (OCPV) version XYZ+
+- Red Hat Virtualization (RHV) version 4.4+ (Extended Support until 1H-2026)
+ - Contains 'Red Hat Virtualization Manager (RHV-M)' and the 'Red Hat Virtualization Host (RHV-H)' hypervisor nodes that this Ansible Role preconfigures
+ - _Formerly called Red Hat Enterprise Virtualization (RHEV) prior to version 4.4_
+ - _Not to be confused with standalone RHEL KVM (RHEL-KVM) hypervisor nodes, which this Ansible Role is not compatible with_
+
+**Prerequisites:**
+- Hypervisor Administrator credentials
+
+**Platform-specific - Red Hat OpenShift Virtualization (OCPV):**
+- Red Hat OpenShift cluster:
+ - Preferable without any previous customization
+ - Worker nodes with minimum 96GB of Memory (DRAM)
+ - Worker nodes with Intel CPU Instruction Sets: `TSX` ([SAP Note 2737837](https://me.sap.com/notes/2737837/E))
+ - Storage as Local Storage (e.g. LVM) using host path provisioner, NFS, OpenShift Data Foundation, or other via storage orchestrators (such as Trident for NetApp)
+
+### Execution/Controller host
+
+**Dependencies:**
+- OS Packages
+ - Python 3.9.7+ (i.e. CPython distribution)
+ - Red Hat OpenShift CLI Client (`oc` binary)
+- Python Packages:
+ - `kubernetes` 29.0.0+
+- Ansible
+ - Ansible Core 2.12.0+
+ - Ansible Collections:
+ - `kubernetes.core` 3.0.0+
+
+**During execution:**
+- For Red Hat OpenShift Virtualization (OCPV), use Environment Variable `KUBECONFIG`
+
+
+## Execution
+
+### Sample execution
+
+For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/). For example:
+
+```shell
+ansible-playbook --connection=local -i "localhost," \
+./playbooks/sample-sap-hypervisor-redhat-ocp-virt-preconfigure.yml \
+-e @./playbooks/vars/sample-variables-sap-hypervisor-redhat-ocp-virt-preconfigure.yml
+```
+
+### Suggested execution sequence
+
+Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first.
+
+### Summary of execution flow
+
+- Execute with specified Hypervisor platform using variable `sap_hypervisor_node_platform`
+- Import default variables from `/vars` for specified Hypervisor platform
+- Re-configure specified Hypervisor platform
+- Append performance configuration for specified Hypervisor platform
+
+### Tags to control execution
+
+There are no tags used to control the execution of this Ansible Role
+
+
+## License
+
+Apache 2.0
+
+
+## Authors
+
+Nils Koenig
+
+---
+
+## Ansible Role Input Variables
+
+Please first check the [/defaults parameters file](./defaults/main.yml), and platform specific parameters (e.g. [/vars/platform_defaults_redhat_ocp_virt](./vars/platform_defaults_redhat_ocp_virt.yml).
+
+Below is the list of input parameters for this Ansible Role.
+
+
+`sap_hypervisor_node_preconfigure_reserved_ram (default: 100)` Reserve memory [GB] for hypervisor host. Depending in the use case should be at least 50-100GB.
+
+`sap_hypervisor_node_preconfigure_reserve_hugepages (default: static)` Hugepage allocation method: {static|runtime}.
+static: done at kernel command line which is slow, but safe
+runtime: done with hugeadm which is faster, but can in some cases not ensure all HPs are allocated.
+
+`sap_hypervisor_node_preconfigure_kvm_nx_huge_pages (default: "auto")` Setting for the huge page shattering kvm.nx_huge_pages: {"auto"|"on"|"off"}. Note the importance of the quotes, otherwise off will be mapped to false. See https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html for additional information:
+
+```ini
+ kvm.nx_huge_pages=
+ [KVM] Controls the software workaround for the
+ X86_BUG_ITLB_MULTIHIT bug.
+ force : Always deploy workaround.
+ off : Never deploy workaround.
+ auto : Deploy workaround based on the presence of
+ X86_BUG_ITLB_MULTIHIT.
+
+ Default is 'auto'.
+
+ If the software workaround is enabled for the host,
+ guests do need not to enable it for nested guests.
+```
+
+`sap_hypervisor_node_preconfigure_tsx (default: "off")` Intel Transactional Synchronization Extensions (TSX): {"on"|"off"}. Note the importance of the quotes, otherwise off will be mapped to false.
+
+`sap_hypervisor_node_preconfigure_assert (default: false)` In assert mode, the parameters on the system are checked if the confirm with what this role would set.
+
+`sap_hypervisor_node_preconfigure_ignore_failed_assertion (default: no)` Fail if assertion is invalid.
+
+`sap_hypervisor_node_preconfigure_run_grub2_mkconfig (default: yes)` Update the grub2 config.
diff --git a/roles/sap_hypervisor_node_preconfigure/defaults/main.yml b/roles/sap_hypervisor_node_preconfigure/defaults/main.yml
new file mode 100644
index 0000000..d072014
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/defaults/main.yml
@@ -0,0 +1,123 @@
+---
+
+# ibmpower_phyp, redhat_ocp_virt, redhat_rhel_kvm, vmware_vsphere
+sap_hypervisor_node_platform:
+
+
+# Example configuration for redhat_ocp_virt
+sap_hypervisor_node_preconfigure_cluster_config:
+
+ # URL under which the OCP cluster is reachable
+ cluster_url: ocpcluster.domain.org
+
+ # namespace under which the VMs are created, note this has to be
+ # openshift-sriov-network-operator in case of using SRIOV network
+ # devices
+ vm_namespace: sap
+
+ # Optional, configuration for trident driver for Netapp NFS filer
+ trident:
+ management: management.domain.org
+ data: datalif.netapp.domain.org
+ svm: sap_svm
+ backend: nas_backend
+ aggregate: aggregate_Name
+ username: admin
+ password: xxxxx
+ storage_driver: ontap-nas
+ storage_prefix: ocpv_sap_
+
+ # CPU cores reserved for kubernetes on worker node
+ worker_kubernetes_reserved_cpus: "0,1"
+
+ # Storage device which should be used if host path provisioner is used
+ worker_localstorage_device: /dev/vdb
+
+ # detailed configuration for every worker that should be configured
+ workers:
+
+ # - name: worker-0 # name must match the node name
+ # networks: # Example network config
+
+ # - name: sapbridge # using a bridge
+ # description: SAP bridge
+ # state: up
+ # type: linux-bridge
+ # ipv4:
+ # enabled: false
+ # auto-gateway: false
+ # auto-dns: false
+ # bridge:
+ # options:
+ # stp:
+ # enabled: false
+ # port:
+ # - name: ens1f0 # network IF name
+
+ # - name: storage # an SRIOV device
+ # interface: ens2f0 # network IF name
+ # type: sriov
+
+ # - name: storagebridge # using a bridge
+ # bridge: # another bridge
+ # options:
+ # stp:
+ # enabled: false
+ # port:
+ # - name: ens2f0 # network IF name
+ # description: storage
+ # mtu: 9000
+ # ipv4:
+ # address:
+ # - ip: 192.168.1.51 # IP config
+ # prefix-length: 24
+ # auto-dns: false
+ # auto-gateway: false
+ # enabled: true
+ # state: up
+ # type: linux-bridge
+
+ # - name: multi # another SRIOV device
+ # interface: ens2f1 # network IF name
+ # type: sriov
+
+ # - name: worker-1 # second worker configuration
+ # networks: # Example network config
+
+ # - name: sapbridge # using a bridge
+ # description: SAP bridge
+ # state: up
+ # type: linux-bridge
+ # ipv4:
+ # enabled: false
+ # auto-gateway: false
+ # auto-dns: false
+ # bridge:
+ # options:
+ # stp:
+ # enabled: false
+ # port:
+ # - name: ens1f0 # network IF name
+
+ # - name: storagebridge # using a bridge
+ # bridge: # another bridge
+ # options:
+ # stp:
+ # enabled: false
+ # port:
+ # - name: ens2f0 # network IF name
+ # description: storage
+ # mtu: 9000
+ # ipv4:
+ # address:
+ # - ip: 192.168.1.51 # IP config
+ # prefix-length: 24
+ # auto-dns: false
+ # auto-gateway: false
+ # enabled: true
+ # state: up
+ # type: linux-bridge
+
+ # - name: storage # an SRIOV device
+ # interface: ens2f0 # network IF name
+ # type: sriov
diff --git a/roles/sap_hypervisor_node_preconfigure/files/platform/ibmpower_phyp/.gitkeep b/roles/sap_hypervisor_node_preconfigure/files/platform/ibmpower_phyp/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/files/platform/redhat_ocp_virt/.gitkeep b/roles/sap_hypervisor_node_preconfigure/files/platform/redhat_ocp_virt/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/files/platform/vmware_vsphere/.gitkeep b/roles/sap_hypervisor_node_preconfigure/files/platform/vmware_vsphere/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/main.yml b/roles/sap_hypervisor_node_preconfigure/handlers/main.yml
new file mode 100644
index 0000000..d943640
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/handlers/main.yml
@@ -0,0 +1,3 @@
+---
+- name: Hypervisor node preconfigure - Include Handler Tasks for {{ sap_hypervisor_node_platform }}
+ ansible.builtin.import_tasks: "platform/{{ sap_hypervisor_node_platform }}/main.yml"
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/platform/ibmpower_phyp/.gitkeep b/roles/sap_hypervisor_node_preconfigure/handlers/platform/ibmpower_phyp/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_ocp_virt/.gitkeep b/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_ocp_virt/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml b/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml
new file mode 100644
index 0000000..f773bdd
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/handlers/platform/redhat_rhel_kvm/main.yml
@@ -0,0 +1,93 @@
+---
+- name: "Check if server is booted in BIOS or UEFI mode"
+ ansible.builtin.stat:
+ path: /sys/firmware/efi
+ get_checksum: no
+ register: __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: Debug BIOS or UEFI
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Run grub-mkconfig (BIOS mode)"
+ ansible.builtin.command: grub2-mkconfig -o /boot/grub2/grub.cfg
+ register: __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_bios_mode
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ notify: __sap_hypervisor_node_preconfigure_reboot_handler
+ when:
+ - not __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
+ become: true
+ become_user: root
+
+- name: "Debug grub-mkconfig BIOS mode"
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_bios_mode.stdout_lines,
+ __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_bios_mode.stderr_lines
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - not __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Set the grub.cfg location RHEL"
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_uefi_boot_dir: /boot/efi/EFI/redhat/grub.cfg
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - ansible_distribution == 'RedHat'
+
+- name: "Set the grub.cfg location SLES"
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_uefi_boot_dir: /boot/efi/EFI/BOOT/grub.cfg
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - ansible_distribution == 'SLES' or ansible_distribution == 'SLES_SAP'
+
+- name: "Run grub-mkconfig (UEFI mode)"
+ ansible.builtin.command: "grub2-mkconfig -o {{ __sap_hypervisor_node_preconfigure_uefi_boot_dir }}"
+ register: __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_uefi_mode
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ notify: __sap_hypervisor_node_preconfigure_reboot_handler
+ when:
+ - __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
+ become: true
+ become_user: root
+
+- name: "Debug grub-mkconfig UEFI"
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_uefi_mode.stdout_lines,
+ __sap_hypervisor_node_preconfigure_register_grub2_mkconfig_uefi_mode.stderr_lines
+ listen: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - __sap_hypervisor_node_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_hypervisor_node_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: Reboot the managed node
+ ansible.builtin.reboot:
+ test_command: /bin/true
+ listen: __sap_hypervisor_node_preconfigure_reboot_handler
+ when:
+ - sap_hypervisor_node_preconfigure_reboot_ok|d(false)
+
+- name: Let the role fail if a reboot is required
+ ansible.builtin.fail:
+ msg: Reboot is required!
+ listen: __sap_hypervisor_node_preconfigure_reboot_handler
+ when:
+ - sap_hypervisor_node_preconfigure_fail_if_reboot_required|d(true)
+ - not sap_hypervisor_node_preconfigure_reboot_ok|d(false)
+
+- name: Show a warning message if a reboot is required
+ ansible.builtin.debug:
+ msg: "WARN: Reboot is required!"
+ listen: __sap_hypervisor_node_preconfigure_reboot_handler
+ when:
+ - not sap_hypervisor_node_preconfigure_fail_if_reboot_required|d(true)
+ - not sap_hypervisor_node_preconfigure_reboot_ok|d(false)
diff --git a/roles/sap_hypervisor_node_preconfigure/handlers/platform/vmware_vsphere/.gitkeep b/roles/sap_hypervisor_node_preconfigure/handlers/platform/vmware_vsphere/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/meta/main.yml b/roles/sap_hypervisor_node_preconfigure/meta/main.yml
new file mode 100644
index 0000000..ba8a2c6
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ namespace: community
+ role_name: sap_hypervisor_node_preconfigure
+ author: Nils Koenig
+ description: Provide the configuration of hypervisors for SAP workloads
+ license: Apache-2.0
+ min_ansible_version: "2.9"
+ galaxy_tags: ['sap', 'hana', 'rhel', 'redhat', 'openshift']
+ platforms:
+ - name: RHEL
+ versions:
+ 8
+dependencies: []
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/main.yml
new file mode 100644
index 0000000..1736339
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: SAP certified hypervisor node preconfigure - Include Vars for {{ sap_hypervisor_node_platform }}
+ ansible.builtin.include_vars: "platform_defaults_{{ sap_hypervisor_node_platform }}.yml"
+
+- name: SAP certified hypervisor node preconfigure - Include Tasks for {{ sap_hypervisor_node_platform }}
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/main.yml"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/ibmpower_phyp/.gitkeep b/roles/sap_hypervisor_node_preconfigure/tasks/platform/ibmpower_phyp/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2 b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2
new file mode 100644
index 0000000..32064a8
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/99-kargs-worker.yml.j2
@@ -0,0 +1,17 @@
+apiVersion: machineconfiguration.openshift.io/v1
+kind: MachineConfig
+metadata:
+ labels:
+ machineconfiguration.openshift.io/role: worker
+ name: 99-kargs-worker
+spec:
+ config:
+ ignition:
+ version: 3.2.0
+ kernelArguments:
+ - intel_iommu=on
+ - iommu=pt
+ - default_hugepagesz=1GB
+ - hugepagesz=1GB
+ - hugepages={{ __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages }}
+ - tsx=on
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml
new file mode 100644
index 0000000..3af1dcf
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/configure-worker-node.yml
@@ -0,0 +1,19 @@
+---
+- name: Label nodes
+ ansible.builtin.command: "oc label node {{ __sap_hypervisor_node_preconfigure_register_worker.name }} cpumanager=true --overwrite=true"
+ register: __sap_hypervisor_node_preconfigure_label_node_result
+ changed_when: __sap_hypervisor_node_preconfigure_label_node_result.rc != 0
+
+- name: Include node network
+ ansible.builtin.include_tasks: node-network.yml
+ with_items: "{{ __sap_hypervisor_node_preconfigure_register_worker.networks }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_worker_network
+ index_var: __sap_hypervisor_node_preconfigure_register_worker_network_nr
+ when: __sap_hypervisor_node_preconfigure_register_worker.networks is defined
+
+# How to wait for node to be scheduleable? (NodeSchedulable)
+- name: Wait for all k8s nodes to be ready
+ ansible.builtin.command: oc wait --for=condition=Ready nodes --all --timeout=3600s
+ register: __sap_hypervisor_node_preconfigure_register_nodes_ready
+ changed_when: __sap_hypervisor_node_preconfigure_register_nodes_ready.rc != 0
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml
new file mode 100644
index 0000000..bbdbdff
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/create-sap-bridge.yml
@@ -0,0 +1,49 @@
+---
+- name: Create SAP bridge NodeNetworkConfigurationPolicy
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: nmstate.io/v1
+ kind: NodeNetworkConfigurationPolicy
+ metadata:
+ name: "sap-bridge-policy-{{ worker.name }}"
+ spec:
+ nodeSelector:
+ kubernetes.io/hostname: "{{ worker.name }}"
+ desiredState:
+ interfaces:
+ - name: sapbridge
+ description: "Linux bridge with {{ worker.sap_bridge_interface }} as physical port to access SAP network"
+ type: linux-bridge
+ state: up
+ ipv4:
+ enabled: false
+ bridge:
+ options:
+ stp:
+ enabled: false
+ port:
+ - name: "{{ worker.sap_bridge_interface }}"
+
+
+- name: Create SAP bridge NetworkAttachmentDefinition
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: "k8s.cni.cncf.io/v1"
+ kind: NetworkAttachmentDefinition
+ metadata:
+ kubernetes.io/hostname: "{{ worker.name }}"
+ machineconfiguration.openshift.io/role: "{{ worker.name }}"
+ namespace: "{{ vm_namespace }}"
+ name: sap-bridge-network-definition
+ annotations:
+ k8s.v1.cni.cncf.io/resourceName: bridge.network.kubevirt.io/sapbridge
+ spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name": "sap-bridge-network-definition",
+ "type": "cnv-bridge",
+ "bridge": "sapbridge",
+ "macspoofchk": true
+ }'
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml
new file mode 100644
index 0000000..e36e26f
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-cnv-operator.yml
@@ -0,0 +1,74 @@
+---
+- name: Create the CNV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: openshift-cnv
+
+- name: Create CNV OperatorGroup kubevirt-hyperconverged-group
+ kubernetes.core.k8s:
+ state: present
+
+ definition:
+ apiVersion: operators.coreos.com/v1
+ kind: OperatorGroup
+ metadata:
+ name: kubevirt-hyperconverged-group
+ namespace: openshift-cnv
+ spec:
+ targetNamespaces:
+ - openshift-cnv
+
+- name: Create CNV Subscription
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ name: hco-operatorhub
+ namespace: openshift-cnv
+ spec:
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ name: kubevirt-hyperconverged
+
+- name: Wait
+ ansible.builtin.pause:
+ seconds: 300
+
+- name: Get Install Plan Name
+ retries: 10
+ delay: 10
+ ansible.builtin.command: oc get subscriptions/hco-operatorhub --namespace openshift-cnv --output=jsonpath='{$.status.installplan.name}'
+ register: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name
+ until: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout != ""
+ changed_when: __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout != ""
+
+- name: Wait for Install Plan to finish
+ ansible.builtin.command: "oc wait installplan \
+ {{ __sap_hypervisor_node_preconfigure_register_cnv_subscription_install_plan_name.stdout }} --namespace openshift-cnv --for=condition='Installed' --timeout='5m'"
+ register: __sap_hypervisor_node_preconfigure_register_wait_for_installplan
+ changed_when: __sap_hypervisor_node_preconfigure_register_wait_for_installplan.rc != 0
+
+- name: Wait
+ ansible.builtin.pause:
+ seconds: 300
+
+- name: Create CNV HyperConverged
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: hco.kubevirt.io/v1beta1
+ kind: HyperConverged
+ metadata:
+ name: kubevirt-hyperconverged
+ namespace: openshift-cnv
+ spec:
+
+- name: Wait
+ ansible.builtin.pause:
+ seconds: 300
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml
new file mode 100644
index 0000000..daa713a
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-hpp.yml
@@ -0,0 +1,89 @@
+---
+- name: Create systemd files for local storage handling
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: MachineConfig
+ metadata:
+ annotations:
+ labels:
+ machineconfiguration.openshift.io/role: worker
+ name: 50-hpp-local
+ spec:
+ config:
+ ignition:
+ version: 2.2.0
+ systemd:
+ units:
+ - contents: |
+ [Unit]
+ Description=Create mountpoint /var/localstorage and initialize filesystem
+ Before=var-localstorage.mount
+ [Service]
+ Type=oneshot
+ ExecStart=/bin/bash -c "if [[ $(lsblk -o FSTYPE {{ sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device }} --noheadings) != 'xfs' ]]; then mkfs.xfs -f {{ sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device }}; fi"
+ ExecStart=/bin/mkdir -p /var/localstorage
+ enabled: true
+ name: create-mountpoint-var-localstorage.service
+ - contents: |
+ [Unit]
+ After=create-mountpoint-var-localstorage.service
+ Requires=create-mountpoint-var-localstorage.service
+ [Mount]
+ What={{ sap_hypervisor_node_preconfigure_cluster_config.worker_localstorage_device }}
+ Where=/var/localstorage
+ Type=xfs
+ [Install]
+ WantedBy=local-fs.target
+ enabled: true
+ name: var-localstorage.mount
+ - contents: |
+ [Unit]
+ Description=Set SELinux chcon for hostpath provisioner
+ Before=kubelet.service
+ After=var-localstorage.mount
+ [Service]
+ ExecStart=/usr/bin/chcon -Rt container_file_t /var/localstorage
+ [Install]
+ WantedBy=multi-user.target
+ enabled: true
+ name: hostpath-provisioner.service
+
+- name: Wait for mountpoint to be ready
+ ansible.builtin.pause:
+ minutes: 3
+
+- name: Create hostpath provisioner (HPP)
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: hostpathprovisioner.kubevirt.io/v1beta1
+ kind: HostPathProvisioner
+ metadata:
+ name: hostpath-provisioner
+ spec:
+ imagePullPolicy: IfNotPresent
+ storagePools:
+ - name: localstorage
+ path: /var/localstorage
+ workload:
+ nodeSelector:
+ kubernetes.io/os: linux
+ machineconfiguration.openshift.io/role: worker
+
+- name: Create storage class for HPP
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: storage.k8s.io/v1
+ kind: StorageClass
+ metadata:
+ name: local
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ provisioner: kubevirt.io.hostpath-provisioner
+ reclaimPolicy: Delete
+ volumeBindingMode: WaitForFirstConsumer
+ parameters:
+ storagePool: localstorage
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml
new file mode 100644
index 0000000..5e1e4f4
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-nmstate-operator.yml
@@ -0,0 +1,70 @@
+---
+- name: Create the nmstate operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ labels:
+ kubernetes.io/metadata.name: openshift-nmstate
+ name: openshift-nmstate
+ name: openshift-nmstate
+ spec:
+ finalizers:
+ - kubernetes
+
+- name: Create the OperatorGroup
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1
+ kind: OperatorGroup
+ metadata:
+ annotations:
+ olm.providedAPIs: NMState.v1.nmstate.io
+ generateName: openshift-nmstate-
+ name: openshift-nmstate-tn6k8
+ namespace: openshift-nmstate
+ spec:
+ targetNamespaces:
+ - openshift-nmstate
+
+- name: Pause to give operator a chance to install
+ ansible.builtin.pause:
+ minutes: 2
+
+- name: Subscribe to the nmstate Operator
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ labels:
+ operators.coreos.com/kubernetes-nmstate-operator.openshift-nmstate: ""
+ name: kubernetes-nmstate-operator
+ namespace: openshift-nmstate
+ spec:
+ channel: stable
+ installPlanApproval: Automatic
+ name: kubernetes-nmstate-operator
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+
+- name: Pause to give operator a chance to install
+ ansible.builtin.pause:
+ minutes: 5
+
+- name: Create instance of the nmstate operator
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: nmstate.io/v1
+ kind: NMState
+ metadata:
+ name: nmstate
+
+- name: Pause to give instance a chance to come up
+ ansible.builtin.pause:
+ minutes: 5
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml
new file mode 100644
index 0000000..5fcb437
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-sriov-operator.yml
@@ -0,0 +1,54 @@
+---
+- name: Create the SRIOV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: openshift-sriov-network-operator
+
+- name: Create the SRIOV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1
+ kind: OperatorGroup
+ metadata:
+ name: sriov-network-operators
+ namespace: openshift-sriov-network-operator
+ spec:
+ targetNamespaces:
+ - openshift-sriov-network-operator
+
+- name: Create the SRIOV Operator namespace
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: operators.coreos.com/v1alpha1
+ kind: Subscription
+ metadata:
+ name: sriov-network-operator-subscription
+ namespace: openshift-sriov-network-operator
+ spec:
+ source: redhat-operators
+ sourceNamespace: openshift-marketplace
+ name: sriov-network-operator
+ channel: "stable"
+
+- name: Pause to give operator a chance to install
+ ansible.builtin.pause:
+ minutes: 3
+
+- name: Copy patch to enable unsupported NICs
+ ansible.builtin.copy:
+ src: sriov-enabled-unsupported-nics.sh
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/sriov-enabled-unsupported-nics.sh"
+ mode: "0755"
+ when: sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics
+
+- name: Enable unsupported NICs
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/sriov-enabled-unsupported-nics.sh"
+ when: sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics
+ register: __sap_hypervisor_node_preconfigure_register_enable_unsupported_nics
+ changed_when: __sap_hypervisor_node_preconfigure_register_enable_unsupported_nics.rc != 0
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml
new file mode 100644
index 0000000..c1788db
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-trident.yml
@@ -0,0 +1,48 @@
+---
+- name: Download trident
+ ansible.builtin.unarchive:
+ remote_src: true
+ src: "{{ sap_hypervisor_node_preconfigure_install_trident_url }}"
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/"
+
+- name: Uninstall trident
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-installer/tridentctl uninstall -n trident"
+ ignore_errors: true
+ register: __sap_hypervisor_node_preconfigure_register_uninstall_trident
+ changed_when: __sap_hypervisor_node_preconfigure_register_uninstall_trident.rc != 0
+
+- name: Install trident
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-installer/tridentctl install -n trident"
+ register: __sap_hypervisor_node_preconfigure_register_install_trident
+ changed_when: __sap_hypervisor_node_preconfigure_register_install_trident.rc != 0
+
+- name: Copy backend file
+ ansible.builtin.template:
+ src: "trident-backend.json.j2"
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/trident-backend.json"
+ mode: "0644"
+
+- name: Create trident backend
+ ansible.builtin.command: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}\
+ /trident-installer/tridentctl -n trident create backend -f\
+ {{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}\
+ /trident-backend.json"
+ register: __sap_hypervisor_node_preconfigure_register_create_trident_backend
+ changed_when: __sap_hypervisor_node_preconfigure_register_create_trident_backend.rc != 0
+
+- name: Create storage class
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: storage.k8s.io/v1
+ kind: StorageClass
+ metadata:
+ name: nas
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "true"
+ provisioner: csi.trident.netapp.io
+ parameters:
+ backendType: "{{ sap_hypervisor_node_preconfigure_cluster_config.trident.storage_driver }}"
+ snapshots: "true"
+ provisioningType: "thin"
+ encryption: "false"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml
new file mode 100644
index 0000000..bd5dd81
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/install-virtctl.yml
@@ -0,0 +1,15 @@
+---
+- name: Create ~/bin
+ ansible.builtin.file:
+ path: ~/bin
+ state: directory
+ mode: "0700"
+
+- name: Get and extract virtctl
+# become: yes
+ ansible.builtin.unarchive:
+ validate_certs: false
+ remote_src: true
+ src: "https://hyperconverged-cluster-cli-download-openshift-cnv.apps.\
+ {{ sap_hypervisor_node_preconfigure_cluster_config.cluster_url }}/amd64/linux/virtctl.tar.gz"
+ dest: ~/bin
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml
new file mode 100644
index 0000000..bd28ea5
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/kargs.yml
@@ -0,0 +1,11 @@
+---
+- name: Personalize template
+ ansible.builtin.template:
+ src: 99-kargs-worker.yml.j2
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-{{ __sap_hypervisor_node_preconfigure_register_worker_name }}.yml.j2"
+ mode: "0644"
+
+- name: Enable hugepages
+ kubernetes.core.k8s:
+ state: present
+ src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-{{ __sap_hypervisor_node_preconfigure_register_worker_name }}.yml.j2"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml
new file mode 100644
index 0000000..57a52da
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/label-worker-invtsc.yml
@@ -0,0 +1,11 @@
+---
+- name: Label worker with invtsc flag
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: default
+ labels:
+ 'feature.node.kubernetes.io/cpu-feature-invtsc': enabled
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml
new file mode 100644
index 0000000..a3dec29
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/main.yml
@@ -0,0 +1,104 @@
+---
+- name: Get a list of all nodes from any namespace
+ kubernetes.core.k8s_info:
+ kind: Node
+ register: __sap_hypervisor_node_preconfigure_register_node_list
+
+- name: Generate list with worker node names
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_node_name_list:
+ "{{ __sap_hypervisor_node_preconfigure_register_worker_node_name_list | \
+ d([]) + [__sap_hypervisor_node_preconfigure_register_worker_node.name] }}"
+ with_items: "{{ sap_hypervisor_node_preconfigure_cluster_config.workers }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_worker_node
+
+- name: Filter hosts
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_nodes:
+ "{{ __sap_hypervisor_node_preconfigure_register_nodes | \
+ d([]) + [__sap_hypervisor_node_preconfigure_register_host] }}"
+ with_items: "{{ __sap_hypervisor_node_preconfigure_register_node_list['resources'] }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_host
+ when: __sap_hypervisor_node_preconfigure_register_host.metadata.name in __sap_hypervisor_node_preconfigure_register_worker_node_name_list
+
+- name: Assert that configured nodes are found
+ ansible.builtin.assert:
+ that: __sap_hypervisor_node_preconfigure_register_nodes is defined
+ fail_msg: No nodes found that match configuration provided in sap_hypervisor_node_preconfigure_cluster_config
+ success_msg: Configured nodes found
+
+# Determine available memory on first worker node.
+# This amount will be used for all nodes, so make sure all have an identical amount.
+- name: Get worker name
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_name:
+ "{{ __sap_hypervisor_node_preconfigure_register_nodes[0]['metadata']['labels']['kubernetes.io/hostname'] }}"
+
+- name: Get memory of first worker node (will be used for all worker nodes later on)
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_memory_gib:
+ "{{ (__sap_hypervisor_node_preconfigure_register_nodes[0]['status']['capacity']['memory'] | replace('Ki', '') | int / 1048576) }}"
+
+- name: Check if host has minimal amount of memory (96GiB)
+ ansible.builtin.assert:
+ that: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int >= 96
+ fail_msg: "Not enough memory on node {{ __sap_hypervisor_node_preconfigure_register_worker_name }}"
+ success_msg: "Enough memory on node {{ __sap_hypervisor_node_preconfigure_register_worker_name }}"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_minimal_memory_check }}"
+
+# calculate memory to be allocated as hugepages
+# if system < 512GiB memory use 32GiB as upper boundary, 64GB otherwise as upper boundary
+- name: Calculate amount of hugepages to reserve (host memory < 512 GiB)
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512 }}"
+ when: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int < 512
+
+- name: Calculate amount of hugepages to reserve (host memory >= 512 GiB)
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_register_worker_reserved_hugepages: "{{ __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int - sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512 }}"
+ when: __sap_hypervisor_node_preconfigure_register_worker_memory_gib | int >= 512
+
+- name: Include prepare
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/prepare.yml"
+
+- name: Include tuned virtual host
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/tuned-virtual-host.yml"
+
+- name: Include install CNV operator
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-cnv-operator.yml"
+ when: sap_hypervisor_node_preconfigure_install_operators
+
+- name: Include install sriov operator
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-sriov-operator.yml"
+ when: sap_hypervisor_node_preconfigure_install_operators
+
+- name: Include install nmstate operator
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-nmstate-operator.yml"
+ when: sap_hypervisor_node_preconfigure_install_operators
+
+- name: Include install virtctl
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-virtctl.yml"
+
+- name: Include setup worker nodes
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/setup-worker-nodes.yml"
+ when: sap_hypervisor_node_preconfigure_setup_workers
+
+# How to wait for node to be scheduleable? (NodeSchedulable)
+- name: Wait for all k8s nodes to be ready
+ ansible.builtin.command: oc wait --for=condition=Ready nodes --all --timeout=3600s
+ register: __sap_hypervisor_node_preconfigure_register_nodes_ready
+ changed_when: __sap_hypervisor_node_preconfigure_register_nodes_ready.rc != 0
+
+- name: Print nodes
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_nodes_ready.stdout_lines
+
+- name: Include Trident installation
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-trident.yml"
+ when: sap_hypervisor_node_preconfigure_install_trident
+
+- name: Include local storage creation (HPP)
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/install-hpp.yml"
+ when: sap_hypervisor_node_preconfigure_install_hpp
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml
new file mode 100644
index 0000000..421d24c
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/node-network.yml
@@ -0,0 +1,103 @@
+---
+- name: Print network
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_register_worker_network
+
+- name: "Create NodeNetworkConfigurationPolicy\
+ {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }} on \
+ {{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: nmstate.io/v1
+ kind: NodeNetworkConfigurationPolicy
+ metadata:
+ name: "{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ spec:
+ nodeSelector:
+ kubernetes.io/hostname: "{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ desiredState:
+ interfaces:
+ - "{{ __sap_hypervisor_node_preconfigure_register_worker_network }}"
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'linux-bridge'
+
+- name: "Create NetworkAttachmentDefinition {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: "k8s.cni.cncf.io/v1"
+ kind: NetworkAttachmentDefinition
+ metadata:
+ namespace: "{{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}"
+ name: "{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-network-definition"
+ annotations:
+ k8s.v1.cni.cncf.io/resourceName: "bridge.network.kubevirt.io/{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}"
+ spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name": "sapbridge-network-definition",
+ "type": "cnv-bridge",
+ "bridge": "sapbridge",
+ "macspoofchk": true
+ }'
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'linux-bridge'
+
+- name: Label the node with feature.node.kubernetes.io/network-sriov.capable=true
+ kubernetes.core.k8s:
+ definition:
+ apiVersion: v1
+ kind: Node
+ metadata:
+ name: "{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ labels:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ state: present
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == 'sriov'
+
+- name: "Create SRIOV NodeNetworkConfigurationPolicy \
+ {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }} on \
+ {{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: sriovnetwork.openshift.io/v1
+ kind: SriovNetworkNodePolicy
+ metadata:
+ name: "iface-{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-sriov-{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ namespace: openshift-sriov-network-operator
+ spec:
+ resourceName: "iface{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}sriov"
+ nodeSelector:
+ feature.node.kubernetes.io/network-sriov.capable: "true"
+ kubernetes.io/hostname: "{{ __sap_hypervisor_node_preconfigure_register_worker.name }}"
+ priority: 5
+ mtu: 9000
+ numVfs: 8
+ nicSelector:
+ pfNames: ['{{ __sap_hypervisor_node_preconfigure_register_worker_network.interface }}#0-7']
+ deviceType: vfio-pci
+ isRdma: false
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == "sriov"
+
+- name: "Create SriovNetwork Attachment Definition {{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}"
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: sriovnetwork.openshift.io/v1
+ kind: SriovNetwork
+ metadata:
+ name: "iface-{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}-sriov"
+ namespace: openshift-sriov-network-operator
+ spec:
+ ipam: |
+ {
+ "type": "host-local",
+ "subnet": "192.168.1.0/24",
+ "rangeStart": "192.168.1.200",
+ "rangeEnd": "192.168.1.210"
+ }
+ networkNamespace: openshift-sriov-network-operator
+ resourceName: "iface{{ __sap_hypervisor_node_preconfigure_register_worker_network.name }}sriov"
+ spoofChk: "off"
+ trust: "on"
+ when: __sap_hypervisor_node_preconfigure_register_worker_network.type == "sriov"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml
new file mode 100644
index 0000000..0dfbfa1
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/prepare.yml
@@ -0,0 +1,16 @@
+---
+- name: Gather Facts
+ ansible.builtin.gather_facts:
+
+- name: Create Tempdir
+ ansible.builtin.tempfile:
+ state: directory
+ suffix: "_sap_hypervisor_node_preconfigure"
+ register: __sap_hypervisor_node_preconfigure_register_tmpdir
+
+- name: "Create VM namespace {{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}"
+ kubernetes.core.k8s:
+ name: "{{ sap_hypervisor_node_preconfigure_cluster_config.vm_namespace }}"
+ api_version: v1
+ kind: Namespace
+ state: present
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml
new file mode 100644
index 0000000..29420be
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/setup-worker-nodes.yml
@@ -0,0 +1,81 @@
+---
+- name: Include configure worker
+ ansible.builtin.include_tasks: "platform/{{ sap_hypervisor_node_platform }}/configure-worker-node.yml"
+ with_items: "{{ sap_hypervisor_node_preconfigure_cluster_config.workers }}"
+ loop_control:
+ loop_var: __sap_hypervisor_node_preconfigure_register_worker
+ index_var: __sap_hypervisor_node_preconfigure_register_worker_nr
+
+- name: Enable CPU Manager by patching MCP worker
+ kubernetes.core.k8s:
+ state: patched
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: MachineConfigPool
+ metadata:
+ name: worker
+ labels:
+ custom-kubelet: cpumanager-enabled
+
+- name: Delete kubletconfig for cpumanager
+ kubernetes.core.k8s:
+ state: absent
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: KubeletConfig
+ metadata:
+ name: cpumanager-enabled
+ spec:
+ machineConfigPoolSelector:
+ matchLabels:
+ custom-kubelet: cpumanager-enabled
+ kubeletConfig:
+ cpuManagerPolicy: static
+ cpuManagerReconcilePeriod: 5s
+
+- name: Create kubletconfig for cpumanager worker with CPUs reserved for kubernetes
+ when: sap_hypervisor_node_preconfigure_cluster_config.worker_kubernetes_reserved_cpus is defined
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: KubeletConfig
+ metadata:
+ name: cpumanager-enabled
+ spec:
+ machineConfigPoolSelector:
+ matchLabels:
+ custom-kubelet: cpumanager-enabled
+ kubeletConfig:
+ cpuManagerPolicy: static
+ cpuManagerReconcilePeriod: 5s
+ reservedSystemCPUs: "{{ sap_hypervisor_node_preconfigure_cluster_config.worker_kubernetes_reserved_cpus }}"
+
+- name: Create kubletconfig for cpumanager worker
+ when: sap_hypervisor_node_preconfigure_cluster_config.worker_kubernetes_reserved_cpus is not defined
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: machineconfiguration.openshift.io/v1
+ kind: KubeletConfig
+ metadata:
+ name: cpumanager-enabled
+ machineconfiguration.openshift.io/role: worker
+ spec:
+ machineConfigPoolSelector:
+ matchLabels:
+ custom-kubelet: cpumanager-enabled
+ kubeletConfig:
+ cpuManagerPolicy: static
+ cpuManagerReconcilePeriod: 5s
+
+- name: Render template
+ ansible.builtin.template:
+ src: 99-kargs-worker.yml.j2
+ dest: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-worker.yml"
+ mode: "0644"
+
+- name: Enable hugepages
+ kubernetes.core.k8s:
+ state: present
+ src: "{{ __sap_hypervisor_node_preconfigure_register_tmpdir.path }}/99-kargs-worker.yml"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh
new file mode 100644
index 0000000..6cec1a6
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/sriov-enabled-unsupported-nics.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+# in order to allow unsupported SRIOV nics such as Mellanox
+oc patch sriovoperatorconfig default --type=merge -n openshift-sriov-network-operator --patch '{ "spec": { "enableOperatorWebhook": false } }'
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2 b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2
new file mode 100644
index 0000000..e422aab
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/trident-backend.json.j2
@@ -0,0 +1,18 @@
+{
+ "nfsMountOptions": "nfsvers=3",
+ "defaults": {
+ "exportPolicy": "default"
+ },
+ "debug":false,
+ "managementLIF":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.management }}",
+ "dataLIF":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.data }}",
+ "svm":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.svm }}",
+ "backendName": "{{ sap_hypervisor_node_preconfigure_cluster_config.trident.backend }}",
+ "aggregate":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.aggregate }}",
+ "username":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.username }}",
+ "password":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.password }}",
+ "storageDriverName":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.storage_driver }}",
+ "storagePrefix":"{{ sap_hypervisor_node_preconfigure_cluster_config.trident.storage_prefix }}",
+ "version":1
+}
+
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml
new file mode 100644
index 0000000..e2dd4f4
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_ocp_virt/tuned-virtual-host.yml
@@ -0,0 +1,21 @@
+---
+- name: Set virtual-host for worker nodes
+ kubernetes.core.k8s:
+ state: present
+ definition:
+ apiVersion: tuned.openshift.io/v1
+ kind: Tuned
+ metadata:
+ name: virtual-host
+ namespace: openshift-cluster-node-tuning-operator
+ spec:
+ profile:
+ - data: |
+ [main]
+ include=virtual-host
+ name: virtual-host
+ recommend:
+ - match:
+ - label: "node-role.kubernetes.io/worker"
+ priority: 10
+ profile: virtual-host
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_hana b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_hana
new file mode 100644
index 0000000..ec1883b
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_hana
@@ -0,0 +1,65 @@
+#!/usr/bin/python3
+
+import os
+import sys
+import traceback
+
+import hooking
+
+'''
+Syntax:
+hana=1 (value doesn't matter)
+
+The VM must be configured as High Performance with 1GB hugepages.
+For that the following kernel boot line is required for the hypervisor:
+
+"default_hugepagesz=1GB hugepagesz=1GB hugepages=[# hugepages needed]"
+
+In addition the "hugepages" custom property needs to be set to 1048576.
+'''
+
+
+if 'hana' in os.environ:
+ try:
+ domxml = hooking.read_domxml()
+ domain = domxml.getElementsByTagName('domain')[0]
+ if not len(domain.getElementsByTagName('memoryBacking')):
+ sys.stderr.write('hugepages: VM is no High Performance VM\n')
+ sys.exit(0)
+
+ if len(domain.getElementsByTagName('cpu')):
+ cpu = domain.getElementsByTagName('cpu')[0]
+ feature_tsc = domxml.createElement('feature')
+ feature_tsc.setAttribute('policy', 'require')
+ feature_tsc.setAttribute('name', 'invtsc')
+ feature_rdt = domxml.createElement('feature')
+ feature_rdt.setAttribute('policy', 'require')
+ feature_rdt.setAttribute('name', 'rdtscp')
+ feature_x2apic = domxml.createElement('feature')
+ feature_x2apic.setAttribute('policy', 'require')
+ feature_x2apic.setAttribute('name', 'x2apic')
+ feature_lvl3 = domxml.createElement('cache')
+ feature_lvl3.setAttribute('level','3')
+ feature_lvl3.setAttribute('mode','emulate')
+ cpu.appendChild(feature_tsc)
+ cpu.appendChild(feature_rdt)
+ cpu.appendChild(feature_lvl3)
+ cpu.appendChild(feature_x2apic)
+
+ if len(domain.getElementsByTagName('clock')):
+ clock = domain.getElementsByTagName('clock')[0]
+ tscClock = domxml.createElement('clock')
+ tscClock.setAttribute('offset', 'utc')
+ timer = domxml.createElement('timer')
+ timer.setAttribute('name','tsc')
+ # Uncomment and adjust for live migration (adjust frequency to match the lowest value in your cluster)
+ #timer.setAttribute('frequency','2494140000')
+ tscClock.appendChild(timer)
+ domain.removeChild(clock)
+ domain.appendChild(tscClock)
+
+ hooking.write_domxml(domxml)
+ except Exception:
+ sys.stderr.write('highperf hook: [unexpected error]: %s\n' %
+ traceback.format_exc())
+ sys.exit(2)
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_iothread_pinning b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_iothread_pinning
new file mode 100644
index 0000000..4454acf
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/50_iothread_pinning
@@ -0,0 +1,64 @@
+#!/usr/bin/python3
+
+import os
+import sys
+import traceback
+
+import hooking
+
+'''
+Syntax:
+iothread=,
+
+This hook will bind the iothread in RHV to the named core(s).
+A maximum of 2 Cores is allowed, ideally pinned to the core (plus its hyperthread) that is bound to the Interrupt
+Allowed syntax is also a range - as well as a mix.
+engine-config -s UserDefinedVMProperties='iothread=^[0-9,-]+$' --cver=4.2
+'''
+
+
+if 'iothread' in os.environ:
+ try:
+ iopin = os.environ['iothread'];
+ domxml = hooking.read_domxml()
+ domain = domxml.getElementsByTagName('domain')[0]
+ if len(domain.getElementsByTagName('iothreads')):
+ if len(domain.getElementsByTagName('iothreadids')):
+ iothreadids = domain.getElementsByTagName('iothreadids')[0]
+ else:
+ iothreadids = domxml.createElement('iothreadids')
+ domain.appendChild(iothreadids)
+
+ if len(iothreadids.getElementsByTagName('iothread')):
+ ids = iothreadids.getElementsByTagName('iothread')[0]
+ else:
+ ids = domxml.createElement('iothread')
+ iothreadids.appendChild(ids)
+ ids.setAttribute('id', '1')
+
+ if len(domain.getElementsByTagName('cputune')):
+ cputune = domain.getElementsByTagName('cputune')[0]
+ else:
+ cputune = domxml.createElement('cputune')
+ domain.appendChile(cputune)
+
+ if len(cputune.getElementsByTagName('iothreadpin')):
+ iothreadpin = cputune.getElementsByTagName('iothreadpin')[0]
+ else:
+ iothreadpin = domxml.createElement('iothreadpin')
+ cputune.appendChild(iothreadpin)
+ iothreadpin.setAttribute('iothread', '1')
+ iothreadpin.setAttribute('cpuset', iopin)
+
+ if len(cputune.getElementsByTagName('emulatorpin')):
+ emulatorpin = cputune.getElementsByTagName('emulatorpin')[0]
+ else:
+ emulatorpin = domxml.createElement('emulatorpin')
+ cputune.appendChild(emulatorpin)
+ emulatorpin.setAttribute('cpuset', iopin)
+
+ hooking.write_domxml(domxml)
+ except Exception:
+ sys.stderr.write('iothreads hook: [unexpected error]: %s\n' %
+ traceback.format_exc())
+ sys.exit(2)
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/allocate-hugepages-at-runtime.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/allocate-hugepages-at-runtime.yml
new file mode 100644
index 0000000..a36d9fd
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/allocate-hugepages-at-runtime.yml
@@ -0,0 +1,27 @@
+---
+- name: Install libhugetlbfs
+ ansible.builtin.yum:
+ name: libhugetlbfs, libhugetlbfs-utils
+ state: present
+
+# better location than rc.local?
+- name: Add hugepage allocation to /etc/rc.local
+ ansible.builtin.blockinfile:
+ path: /etc/rc.local
+ marker: ""
+ block: |
+ hugeadm --create-mounts --pool-pages-min 1G:$(free -g | grep "Mem:" | awk '{print $2-"{{ sap_hypervisor_node_preconfigure_reserved_ram }}"}')
+
+- name: Set default hugepage size
+ ansible.builtin.lineinfile:
+ path: /etc/default/grub
+ backup: true
+ backrefs: true
+ state: present
+ regexp: '^(GRUB_CMDLINE_LINUX=(?!.* {{ item }}).*). *$'
+ line: "\\1 {{ item }}\""
+ with_items:
+ - default_hugepagesz=1GB
+ - hugepagesz=1GB
+ notify: "Regenerate grub2 conf handler"
+ tags: grubconfig
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml
new file mode 100644
index 0000000..934fdc2
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-configuration.yml
@@ -0,0 +1,136 @@
+---
+# tasks file for sap_hypervisor_node_preconfigure
+
+- name: Get kernel command line
+ ansible.builtin.command: cat /proc/cmdline
+ register: __sap_hypervisor_node_preconfigure_kernelcmdline_assert
+ changed_when: __sap_hypervisor_node_preconfigure_kernelcmdline_assert.rc != 0
+
+- name: "Assert - Kernel same page merging (KSM): Get status"
+ ansible.builtin.command: systemctl status ksm
+ register: __sap_hypervisor_node_preconfigure_ksmstatus_assert
+ ignore_errors: true
+ changed_when: __sap_hypervisor_node_preconfigure_ksmstatus_assert.rc != 0
+
+- name: "Assert - Kernel same page merging (KSM): Check if stopped"
+ ansible.builtin.assert:
+ that: "'Active: inactive (dead)' in __sap_hypervisor_node_preconfigure_ksmstatus_assert.stdout"
+ fail_msg: "FAIL: ksm is running"
+ success_msg: "PASS: ksm not running"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: "Assert - Kernel same page merging (KSM) Tuning Daemon: Get status"
+ ansible.builtin.command: systemctl status ksmtuned
+ register: __sap_hypervisor_node_preconfigure_ksmtunedstatus_assert
+ ignore_errors: true
+ changed_when: __sap_hypervisor_node_preconfigure_ksmtunedstatus_assert.rc != 0
+
+- name: "Assert - Kernel same page merging (KSM) Tuning Daemon: Check if stopped"
+ ansible.builtin.assert:
+ that: "'Active: inactive (dead)' in __sap_hypervisor_node_preconfigure_ksmtunedstatus_assert.stdout"
+ fail_msg: "FAIL: ksmtuned is running"
+ success_msg: "PASS: ksmtuned not running"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: Check CPU Stepping
+ ansible.builtin.shell: set -o pipefail && lscpu | awk '/Stepping/{print $2}'
+ register: __sap_hypervisor_node_preconfigure_cpu_stepping_output_assert
+ changed_when: __sap_hypervisor_node_preconfigure_cpu_stepping_output_assert.rc != 0
+
+- name: Register stepping as fact
+ ansible.builtin.set_fact:
+ __sap_hypervisor_node_preconfigure_cpu_stepping_assert: "{{ __sap_hypervisor_node_preconfigure_cpu_stepping_output_assert.stdout }}"
+
+- name: Print CPU Stepping
+ ansible.builtin.debug:
+ var: __sap_hypervisor_node_preconfigure_cpu_stepping_assert
+
+# skylake:
+- name: Assert - Check Intel Skylake CPU Platform
+ when: __sap_hypervisor_node_preconfigure_cpu_stepping_assert == "4"
+ block:
+ - name: Get ple_gap
+ ansible.builtin.command: grep -E '^options\s+kvm_intel.*?ple_gap\s*=\s*0.*$' /etc/modprobe.d/kvm.conf
+ register: __sap_hypervisor_node_preconfigure_skylake_plegap_assert
+ ignore_errors: true
+ changed_when: __sap_hypervisor_node_preconfigure_skylake_plegap_assert.rc != 0
+
+ - name: Assert - Check if ple_gap=0
+ ansible.builtin.assert:
+ that: "__sap_hypervisor_node_preconfigure_skylake_plegap_assert.rc == 0"
+ fail_msg: "FAIL: ple_gap is not set to 0"
+ success_msg: "PASS: ple_gap is set to 0"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+ - name: Assert - Check for spectre_v2=retpoline
+ ansible.builtin.assert:
+ that: "'spectre_v2=retpoline' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
+ fail_msg: "FAIL: spectre_v2=retpoline is not on Kernel command line"
+ success_msg: "PASS: spectre_v2=retpoline is on Kernel command line"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: Assert - check sap_hypervisor_node_preconfigure_nx_huge_pages
+ when: sap_hypervisor_node_preconfigure_kvm_nx_huge_pages is defined
+ block:
+ - name: Set fact for sap_hypervisor_node_preconfigure_register_assert_nx_huge_pages
+ ansible.builtin.set_fact:
+ sap_hypervisor_node_preconfigure_register_assert_nx_huge_pages: "{{ __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout | regex_search('kvm.nx_huge_pages=(.+)', '\\1') | first }}"
+ - name: "Assert - Check kvm.nx_huge_pages is {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
+ ansible.builtin.assert:
+ that: sap_hypervisor_node_preconfigure_register_assert_nx_huge_pages == sap_hypervisor_node_preconfigure_kvm_nx_huge_pages
+ fail_msg: "FAIL: kvm.nx_huge_pages is not {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
+ success_msg: "PASS: kvm.nx_huge_pages is {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: Assert - check seccomp_sandbox=0
+ block:
+ - name: Get seccomp setting
+ ansible.builtin.command: grep -E '^seccomp_sandbox\s+=\s+0.*$' /etc/libvirt/qemu.conf
+ register: __sap_hypervisor_node_preconfigure_seccomp_assert
+ ignore_errors: true
+ changed_when: __sap_hypervisor_node_preconfigure_seccomp_assert.rc != 0
+
+ - name: "Assert - Check seccomp_sandbox=0 is in /etc/libvirt/qemu.conf"
+ ansible.builtin.assert:
+ that: __sap_hypervisor_node_preconfigure_seccomp_assert is success
+ fail_msg: "FAIL: seccomp_sandbox != 0"
+ success_msg: "PASS: seccomp_sanbox == 0"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: Assert - check amount of 1G hugepages
+ block:
+ - name: Get amount of 1G hugepages
+ ansible.builtin.shell: set -o pipefail && hugeadm --pool-list | grep 1073741824 | awk '{print $3}'
+ register: __sap_hypervisor_node_preconfigure_1g_hugepages_assert
+ changed_when: __sap_hypervisor_node_preconfigure_1g_hugepages_assert.rc != 0
+
+ - name: "Check that at least {{ sap_hypervisor_node_preconfigure_reserved_ram }} GB are available for the hypervisor and the rest are 1G hugepages"
+ ansible.builtin.assert:
+ that: ((ansible_memtotal_mb / 1024) | int - sap_hypervisor_node_preconfigure_reserved_ram | int) >= (__sap_hypervisor_node_preconfigure_1g_hugepages_assert.stdout | int)
+ fail_msg: "FAIL: Not enough memory reserved for hypervisor"
+ success_msg: "PASS: Enough memory reserved for hypervisor"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: Assert - check Kernel command line
+ block:
+ - name: Ensure iommu is enabled
+ ansible.builtin.assert:
+ that: "'intel_iommu=on' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
+ fail_msg: "FAIL: intel_iommu=on not on Kernel command line"
+ success_msg: "PASS: intel_iommu=on on Kernel command line"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+ - name: Ensure iommu passthrough is enabled
+ ansible.builtin.assert:
+ that: "'iommu=pt' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
+ fail_msg: "FAIL: iommu=pt not on Kernel command line"
+ success_msg: "PASS: iommu=pt on Kernel command line"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+ # See SAP Note 2737837 - SAP HANA and the Intel Transactional Synchronization Extensions Capability
+ - name: Ensure tsx is on
+ ansible.builtin.assert:
+ that: "'tsx=on' in __sap_hypervisor_node_preconfigure_kernelcmdline_assert.stdout"
+ fail_msg: "FAIL: tsx=on not in Kernel command line"
+ success_msg: "PASS: tsx=on in Kernel command line"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml
new file mode 100644
index 0000000..34aa301
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-installation.yml
@@ -0,0 +1,14 @@
+---
+- name: Gather package facts
+ ansible.builtin.package_facts:
+
+- name: Assert that all required packages are installed
+ ansible.builtin.assert:
+ that: line_item in ansible_facts.packages
+ fail_msg: "FAIL: Package '{{ line_item }}' is not installed!"
+ success_msg: "PASS: Package '{{ line_item }}' is installed."
+ with_items:
+ - "{{ sap_hypervisor_node_preconfigure_packages }}"
+ loop_control:
+ loop_var: line_item
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml
new file mode 100644
index 0000000..e4f40b9
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-rhv-hooks.yml
@@ -0,0 +1,37 @@
+---
+- name: Check file permissions
+ ansible.builtin.command: "stat -c%a /usr/libexec/vdsm/hooks/before_vm_start/{{ item }}"
+ register: __sap_hypervisor_node_preconfigure_register_file_permissions_assert
+ changed_when: __sap_hypervisor_node_preconfigure_register_file_permissions_assert.rc != 0
+
+- name: Assert hook file permissions
+ ansible.builtin.assert:
+ that: "__sap_hypervisor_node_preconfigure_register_file_permissions_assert.stdout == '755'"
+ fail_msg: "FAIL: Hook {{ item }} does not have the correct file permissions (!= 755)."
+ success_msg: "PASS: Hook {{ item }} does have the correct file permissions (755)."
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
+
+- name: Create tmp dir
+ ansible.builtin.file:
+ path: /tmp/sap_hypervisor_node_preconfigure
+ state: directory
+ mode: "0755"
+
+- name: Copy hook for checking
+ ansible.builtin.copy:
+ dest: "/tmp/sap_hypervisor_node_preconfigure/{{ item }}"
+ src: "{{ item }}"
+ mode: "0755"
+
+- name: Diff hook
+ ansible.builtin.command: "diff -uw /tmp/sap_hypervisor_node_preconfigure/{{ item }} /usr/libexec/vdsm/hooks/before_vm_start/{{ item }}"
+ register: __sap_hypervisor_node_preconfigure_register_hook_diff_assert
+ ignore_errors: true
+ changed_when: __sap_hypervisor_node_preconfigure_register_hook_diff_assert.rc != 0
+
+- name: Assert hook content
+ ansible.builtin.assert:
+ that: "__sap_hypervisor_node_preconfigure_register_hook_diff_assert.rc == 0"
+ fail_msg: "FAIL: Hook {{ item }} has been modified, please investigate manually."
+ success_msg: "PASS: Hook {{ item }} not modified"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml
new file mode 100644
index 0000000..ab0d0c9
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/assert-set-tuned-profile.yml
@@ -0,0 +1,14 @@
+---
+- name: Assert - Check tuned profile
+ block:
+ - name: Get tuned profile
+ ansible.builtin.command: tuned-adm active
+ register: __sap_hypervisor_node_preconfigure_tuned_profile_assert
+ changed_when: __sap_hypervisor_node_preconfigure_tuned_profile_assert.rc != 0
+
+ - name: Verify tuned profile
+ ansible.builtin.assert:
+ that: "'Current active profile: sap-hana-kvm-host' in __sap_hypervisor_node_preconfigure_tuned_profile_assert.stdout"
+ fail_msg: "FAIL: tuned profile is not sap-hana-kvm-host"
+ success_msg: "PASS: tuned profile is sap-hana-kvm-host"
+ ignore_errors: "{{ sap_hypervisor_node_preconfigure_ignore_failed_assertion }}"
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml
new file mode 100644
index 0000000..f647e2f
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/configuration.yml
@@ -0,0 +1,132 @@
+---
+# tasks file for sap_hypervisor_node_preconfigure
+
+- name: Stop and disable kernel same page merging (KSM)
+ ansible.builtin.systemd:
+ name: ksm
+ state: stopped
+ enabled: false
+
+- name: Stop and disable kernel same page merging (KSM) tuning daemon
+ ansible.builtin.systemd:
+ name: ksmtuned
+ state: stopped
+ enabled: false
+
+- name: Check CPU Stepping
+ ansible.builtin.shell: set -o pipefail && lscpu | awk '/Stepping/{print $2}'
+ register: cpu_stepping_output
+ changed_when: cpu_stepping_output.rc != 0
+
+- name: Register CPU stepping as fact
+ ansible.builtin.set_fact:
+ cpu_stepping: "{{ cpu_stepping_output.stdout }}"
+ become: true
+ become_user: root
+
+# skylake:
+- name: Set ple_gap=0 on Intel Skylake CPU Platform
+ ansible.builtin.lineinfile:
+ path: /etc/modprobe.d/kvm.conf
+ line: options kvm_intel ple_gap=0
+ when: cpu_stepping == "4"
+ become: true
+ become_user: root
+
+# skylake
+- name: Set spectre_v2=retpoline on Intel Skylake CPU Platform
+ ansible.builtin.lineinfile:
+ path: /etc/default/grub
+ backup: true
+ backrefs: true
+ state: present
+ regexp: '^(GRUB_CMDLINE_LINUX=(?!.* {{ item }}).*). *$'
+ line: "\\1 {{ item }}\""
+ with_items:
+ - "spectre_v2=retpoline"
+ notify: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ tags: grubconfig
+ when: cpu_stepping == "4"
+ become: true
+ become_user: root
+
+- name: "Set kvm.nx_huge_pages to {{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
+ ansible.builtin.lineinfile:
+ path: /etc/default/grub
+ backup: true
+ backrefs: true
+ state: present
+ regexp: '^(GRUB_CMDLINE_LINUX=(?!.* {{ item }}).*). *$'
+ line: "\\1 {{ item }}\""
+ with_items:
+ - "kvm.nx_huge_pages={{ sap_hypervisor_node_preconfigure_kvm_nx_huge_pages }}"
+ notify: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ tags: grubconfig
+ when: sap_hypervisor_node_preconfigure_kvm_nx_huge_pages is defined
+ become: true
+ become_user: root
+
+- name: Set seccomp_sanbox=0
+ ansible.builtin.lineinfile:
+ path: /etc/libvirt/qemu.conf
+ backup: true
+ backrefs: true
+ state: present
+ regexp: 'seccomp_sandbox'
+ line: "seccomp_sandbox = 0"
+ become: true
+ become_user: root
+
+- name: Include allocate hughepages at runtime
+ ansible.builtin.include_tasks: allocate-hugepages-at-runtime.yml
+ when: sap_hypervisor_node_preconfigure_reserve_hugepages == "runtime"
+
+- name: Reserve Hugepages statically
+ ansible.builtin.lineinfile:
+ path: /etc/default/grub
+ backup: true
+ backrefs: true
+ state: present
+ regexp: '^(GRUB_CMDLINE_LINUX=(?!.* {{ item }}).*). *$'
+ line: "\\1 {{ item }}\""
+ with_items:
+ - default_hugepagesz=1GB
+ - hugepagesz=1GB
+ - hugepages={{ (ansible_memtotal_mb / 1024) | int - sap_hypervisor_node_preconfigure_reserved_ram }}
+ notify: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ tags: grubconfig
+ when: sap_hypervisor_node_preconfigure_reserve_hugepages == "static"
+ become: true
+ become_user: root
+
+- name: Enable IOMMU PT
+ ansible.builtin.lineinfile:
+ path: /etc/default/grub
+ backup: true
+ backrefs: true
+ state: present
+ regexp: '^(GRUB_CMDLINE_LINUX=(?!.* {{ item }}).*). *$'
+ line: "\\1 {{ item }}\""
+ with_items:
+ - intel_iommu=on
+ - iommu=pt
+ notify: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ tags: grubconfig
+ become: true
+ become_user: root
+
+# See SAP Note 2737837 - SAP HANA and the Intel Transactional Synchronization Extensions Capability
+- name: Enable TSX
+ ansible.builtin.lineinfile:
+ path: /etc/default/grub
+ backup: true
+ backrefs: true
+ state: present
+ regexp: '^(GRUB_CMDLINE_LINUX=(?!.* {{ item }}).*). *$'
+ line: "\\1 {{ item }}\""
+ with_items:
+ - "tsx={{ sap_hypervisor_node_preconfigure_tsx }}"
+ notify: __sap_hypervisor_node_preconfigure_regenerate_grub2_conf_handler
+ tags: grubconfig
+ become: true
+ become_user: root
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/installation.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/installation.yml
new file mode 100644
index 0000000..c38cf3a
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/installation.yml
@@ -0,0 +1,7 @@
+---
+- name: Ensure required packages are installed
+ ansible.builtin.package:
+ state: present
+ name: "{{ sap_hypervisor_node_preconfigure_packages }}"
+ become: true
+ become_user: root
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml
new file mode 100644
index 0000000..dd405a4
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/main.yml
@@ -0,0 +1,28 @@
+---
+- name: Display sap_hypervisor_node_preconfigure_assert
+ ansible.builtin.debug:
+ var: sap_hypervisor_node_preconfigure_assert
+
+- name: Set filename prefix to empty string if role is run in normal mode
+ ansible.builtin.set_fact:
+ assert_prefix: ""
+
+- name: Prepend filename with assert string if role is run in assert mode
+ ansible.builtin.set_fact:
+ assert_prefix: "assert-"
+ when: sap_hypervisor_node_preconfigure_assert
+
+- name: Include "{{ assert_prefix }}installation.yml"
+ ansible.builtin.include_tasks: '{{ assert_prefix }}installation.yml'
+
+- name: Include "{{ assert_prefix }}configuration.yml"
+ ansible.builtin.include_tasks: '{{ assert_prefix }}configuration.yml'
+
+- name: Include "{{ assert_prefix }}set-tuned-profile.yml"
+ ansible.builtin.include_tasks: '{{ assert_prefix }}set-tuned-profile.yml'
+
+- name: Include "{{ assert_prefix }}rhv-hooks.yml"
+ ansible.builtin.include_tasks: "{{ assert_prefix }}rhv-hooks.yml"
+ loop:
+ - 50_hana
+ - 50_iothread_pinning
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml
new file mode 100644
index 0000000..ee0d63a
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/rhv-hooks.yml
@@ -0,0 +1,16 @@
+---
+- name: Create hook dir
+ ansible.builtin.file:
+ path: /usr/libexec/vdsm/hooks/before_vm_start
+ state: directory
+ mode: "0755"
+ become: true
+ become_user: root
+
+- name: Copy hook
+ ansible.builtin.copy:
+ dest: "/usr/libexec/vdsm/hooks/before_vm_start/{{ item }}"
+ src: "{{ item }}"
+ mode: "0755"
+ become: true
+ become_user: root
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml
new file mode 100644
index 0000000..91c3d77
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/tasks/platform/redhat_rhel_kvm/set-tuned-profile.yml
@@ -0,0 +1,44 @@
+---
+- name: Create tuned profile directory /usr/lib/tuned/sap-hana-kvm-host
+ ansible.builtin.file:
+ path: /usr/lib/tuned/sap-hana-kvm-host
+ state: directory
+ mode: "0755"
+ become: true
+ become_user: root
+
+- name: Create sap-hana-kvm-host tuned profile
+ ansible.builtin.copy:
+ dest: "/usr/lib/tuned/sap-hana-kvm-host/tuned.conf"
+ mode: "0644"
+ content: |
+ #
+ # tuned configuration
+ #
+ [main]
+ summary=Optimize for running as KVM host for SAP HANA as virtual guest
+ include=throughput-performance
+
+ [sysctl]
+ # Start background writeback (via writeback threads) at this percentage (system
+ # default is 10%)
+ vm.dirty_background_ratio = 5
+
+ # The total time the scheduler will consider a migrated process
+ # "cache hot" and thus less likely to be re-migrated
+ # (system default is 500000, i.e. 0.5 ms)
+ kernel.sched_migration_cost_ns = 5000000
+
+ [cpu]
+ # Setting C3 state sleep mode/power savings on X86
+ # and force_latency=70 on Power
+ force_latency=cstate.id:3|70
+ become: true
+ become_user: root
+
+- name: Activate tuned profile
+ ansible.builtin.command: tuned-adm profile sap-hana-kvm-host
+ register: __sap_hypervisor_node_preconfigre_register_tuned_activation_output
+ become: true
+ become_user: root
+ changed_when: __sap_hypervisor_node_preconfigre_register_tuned_activation_output.rc != 0
diff --git a/roles/sap_hypervisor_node_preconfigure/tasks/platform/vmware_vsphere/.gitkeep b/roles/sap_hypervisor_node_preconfigure/tasks/platform/vmware_vsphere/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_hypervisor_node_preconfigure/vars/main.yml b/roles/sap_hypervisor_node_preconfigure/vars/main.yml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml
new file mode 100644
index 0000000..6bfd827
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_ocp_virt.yml
@@ -0,0 +1,30 @@
+---
+
+# vars file for redhat_ocp_virt
+
+# Install and configure the host path provisioner (hpp) for a local storage disk
+sap_hypervisor_node_preconfigure_install_hpp: false
+
+# Install the trident NFS storage provider
+sap_hypervisor_node_preconfigure_install_trident: false
+
+# URL of the trident installer package to use
+sap_hypervisor_node_preconfigure_install_trident_url: https://github.com/NetApp/trident/releases/download/v23.01.0/trident-installer-23.01.0.tar.gz
+
+# should SRIOV be enabled for unsupported NICs
+sap_hypervisor_node_preconfigure_sriov_enable_unsupported_nics: true
+
+# Amount of memory [GiB] to be reserved for the hypervisor on hosts >= 512GiB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_ge_512: 64 # GiB
+
+# Amount of memory [GiB] to be reserved for the hypervisor on hosts < 512GiB
+sap_hypervisor_node_preconfigure_hypervisor_reserved_ram_host_lt_512: 32 # GiB
+
+# Should the check for the minimal amount of be ignored? Minimal amount is 96 GiB
+sap_hypervisor_node_preconfigure_ignore_minimal_memory_check: false
+
+# Should the operators be installed
+sap_hypervisor_node_preconfigure_install_operators: true
+
+# Configure the workers?
+sap_hypervisor_node_preconfigure_setup_workers: true
diff --git a/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
new file mode 100644
index 0000000..92d1f2f
--- /dev/null
+++ b/roles/sap_hypervisor_node_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
@@ -0,0 +1,39 @@
+---
+
+# vars file for redhat_rhel_kvm
+
+# packages to install
+sap_hypervisor_node_preconfigure_packages:
+ - libhugetlbfs-utils
+ - qemu-kvm
+ - libvirt
+ - tuned
+
+# is it okay to reboot host?
+sap_hypervisor_node_preconfigure_reboot_ok: true
+
+# should the role fail if a reboot is required
+sap_hypervisor_node_preconfigure_fail_if_reboot_required: false
+
+# recreate kernel command line with grub2-mkconfig
+sap_hypervisor_node_preconfigure_run_grub2_mkconfig: true
+
+# Reserve memory [GB] for hypervisor host
+sap_hypervisor_node_preconfigure_reserved_ram: 100
+
+# allocate hugepages: {static|runtime}
+# static: done at kernel command line which is slow, but safe
+# runtime: done through tuned-profile
+sap_hypervisor_node_preconfigure_reserve_hugepages: static
+
+# kvm.nx_huge_pages: {"auto"|"on"|"off"}
+# Note the importance of the quotes, otherwise off will be mapped to false
+sap_hypervisor_node_preconfigure_kvm_nx_huge_pages: "auto"
+
+# Intel Transactional Synchronization Extensions (TSX): {"on"|"off"}
+# Note the importance of the quotes, otherwise off will be mapped to false
+# See SAP Note 2737837 - SAP HANA and the Intel Transactional Synchronization Extensions Capability
+sap_hypervisor_node_preconfigure_tsx: "on"
+
+# fail if assertion is invalid
+sap_hypervisor_node_preconfigure_ignore_failed_assertion: false
diff --git a/roles/sap_vm_preconfigure/.ansible-lint b/roles/sap_vm_preconfigure/.ansible-lint
new file mode 100644
index 0000000..8a5df4d
--- /dev/null
+++ b/roles/sap_vm_preconfigure/.ansible-lint
@@ -0,0 +1,16 @@
+---
+exclude_paths:
+ - tests/
+enable_list:
+ - yaml
+skip_list:
+ # We don't want to enforce new Ansible versions for Galaxy:
+ - meta-runtime[unsupported-version]
+ # We do not want to use checks which are marked as experimental:
+ - experimental
+ # We use ignore_errors for all the assert tasks, which should be acceptable:
+ - ignore-errors
+ # We want to allow single digit version numbers in a role's meta/main.yml file:
+ - schema
+ # Allow templating inside name because it creates more detailed output:
+ - name[template]
diff --git a/roles/sap_vm_preconfigure/.yamllint.yml b/roles/sap_vm_preconfigure/.yamllint.yml
new file mode 100644
index 0000000..57ef427
--- /dev/null
+++ b/roles/sap_vm_preconfigure/.yamllint.yml
@@ -0,0 +1,21 @@
+---
+# Based on ansible-lint config
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+# colons: {max-spaces-after: -1, level: error}
+# commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+# document-start: disable
+# empty-lines: {max: 3, level: error}
+# hyphens: {level: error}
+# indentation: disable
+# key-duplicates: enable
+ line-length: disable
+# new-line-at-end-of-file: disable
+# new-lines: {type: unix}
+# trailing-spaces: disable
+ truthy: disable
diff --git a/roles/sap_vm_preconfigure/README.md b/roles/sap_vm_preconfigure/README.md
new file mode 100644
index 0000000..03289ca
--- /dev/null
+++ b/roles/sap_vm_preconfigure/README.md
@@ -0,0 +1,97 @@
+`WIP`
+
+# sap_vm_preconfigure
+
+Ansible Role for Vendor-specific configuration preparation tasks for Virtual Machines running SAP Systems.
+
+This Ansible Role will configure Virtual Machines on the following Infrastructure Platforms in order to run SAP workloads:
+- Red Hat Enterprise Virtualization (RHV), i.e. OVirt KVM
+
+
+## Functionality
+
+Detect current Infrastructure Platform and execute tasks specified by the vendor.
+
+
+## Scope
+
+All hosts for SAP Software.
+
+
+## Requirements
+
+### Target hosts
+
+**OS Versions:**
+- Red Hat Enterprise Linux 8.2+
+- SUSE Linux Enterprise Server 15 SP3+
+
+### Execution/Controller host
+
+**Dependencies:**
+- OS Packages
+ - Python 3.9.7+ (i.e. CPython distribution)
+- Python Packages
+ - None
+- Ansible
+ - Ansible Core 2.12.0+
+ - Ansible Collections:
+ - None
+
+
+## Execution
+
+### Sample execution
+
+For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/).
+
+### Suggested execution sequence
+
+Prior to execution of this Ansible Role, it is advised to first execute:
+- sap_general_preconfigure
+- sap_netweaver_preconfigure / sap_hana_preconfigure
+
+### Summary of execution flow
+
+- Detect Platform (or specify)
+- Execute tasks defined by Infrastructure Platform vendor
+
+### Tags to control execution
+
+There are no tags used to control the execution of this Ansible Role
+
+
+## License
+
+Apache 2.0
+
+
+## Authors
+
+TBD
+
+---
+
+## Ansible Role Input Variables
+
+Please first check the [/defaults parameters file](./defaults/main.yml), and platform specific parameters within [/vars parameters file](./vars/) path.
+
+
+### Run the role in assert mode
+
+```yaml
+sap_vm_preconfigure_assert (default: no)
+```
+
+If the following variable is set to `yes`, the role will only check if the configuration of the managed mmachines is according to this role. Default is `no`.
+
+
+### Behavior of the role in assert mode
+
+```yaml
+sap_vm_preconfigure_assert_ignore_errors (default: no)
+```
+
+If the role is run in assert mode and the following variable is set to `yes`, assertion errors will not cause the role to fail. This can be useful for creating reports.
+
+Default is `no`, meaning that the role will fail for any assertion error which is discovered. This variable has no meaning if the role is not run in assert mode.
diff --git a/roles/sap_vm_preconfigure/defaults/main.yml b/roles/sap_vm_preconfigure/defaults/main.yml
new file mode 100644
index 0000000..45de6c6
--- /dev/null
+++ b/roles/sap_vm_preconfigure/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+
+# For setting Ansible Var sap_vm_platform, unless overridden
+sap_vm_platform_detect: false
+
+# For reuse of this Ansible Role to establish which platform, without running any of the preconfigure Ansible Tasks for that platform
+sap_vm_platform_detect_only: false
+
+# Static definition, required if detection boolean is set to false
+# cloud_aliyun_ecs_vm, cloud_aws_ec2_vs, cloud_gcp_ce_vm, cloud_ibmcloud_powervs, cloud_ibmcloud_vs, cloud_msazure_vm, hyp_ibmpower_lpar, hyp_redhat_ocp_virt_vm, hyp_redhat_rhel_kvm_vm, hyp_vmware_vsphere_vm
+sap_vm_platform:
diff --git a/roles/sap_vm_preconfigure/files/platform/cloud_aliyun_ecs_vm/.gitkeep b/roles/sap_vm_preconfigure/files/platform/cloud_aliyun_ecs_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/cloud_aws_ec2_vs/.gitkeep b/roles/sap_vm_preconfigure/files/platform/cloud_aws_ec2_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/cloud_gcp_ce_vm/.gitkeep b/roles/sap_vm_preconfigure/files/platform/cloud_gcp_ce_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/cloud_ibmcloud_powervs/.gitkeep b/roles/sap_vm_preconfigure/files/platform/cloud_ibmcloud_powervs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/cloud_ibmcloud_vs/.gitkeep b/roles/sap_vm_preconfigure/files/platform/cloud_ibmcloud_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/cloud_msazure_vm/.gitkeep b/roles/sap_vm_preconfigure/files/platform/cloud_msazure_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/ibmpower_lpar/.gitkeep b/roles/sap_vm_preconfigure/files/platform/ibmpower_lpar/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/redhat_ocp_virt_vm/.gitkeep b/roles/sap_vm_preconfigure/files/platform/redhat_ocp_virt_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/haltpoll.sh b/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/haltpoll.sh
new file mode 100644
index 0000000..e2a14ed
--- /dev/null
+++ b/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/haltpoll.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+if [ "$1" == "start" ]; then
+ modprobe cpuidle-haltpoll force
+fi
+
+## Question: Does this also need another "if" checking to see if $1 is "stop" to unload the module?
diff --git a/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/tuned.conf b/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/tuned.conf
new file mode 100644
index 0000000..82ad6af
--- /dev/null
+++ b/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana-kvm-guest/tuned.conf
@@ -0,0 +1,24 @@
+#
+# tuned configuration
+#
+[main]
+summary=Optimize for running SAP HANA on KVM inside a virtual guest
+include=sap-hana
+
+[haltpoll]
+type=script
+script=${i:PROFILE_DIR}/haltpoll.sh
+
+[sysfs]
+/sys/devices/system/clocksource/clocksource0/current_clocksource=tsc
+/sys/module/haltpoll/parameters/guest_halt_poll_ns=2400000
+/sys/module/haltpoll/parameters/guest_halt_poll_grow_start=2400000
+
+[sysctl]
+kernel.sched_latency_ns=12000000
+kernel.sched_migration_cost_ns=500000
+kernel.sched_min_granularity_ns=12000000
+kernel.sched_wakeup_granularity_ns=15000000
+
+[bootloader]
+cmdline_saphana=skew_tick=1
diff --git a/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana/tuned.conf b/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana/tuned.conf
new file mode 100644
index 0000000..ba688f9
--- /dev/null
+++ b/roles/sap_vm_preconfigure/files/platform/redhat_rhel_kvm_vm/tuned/sap-hana/tuned.conf
@@ -0,0 +1,24 @@
+#
+# tuned configuration
+#
+[main]
+summary=Optimize for SAP HANA
+
+[cpu]
+force_latency=cstate.id:3|70
+governor=performance
+energy_perf_bias=performance
+min_perf_pct=100
+
+[vm]
+transparent_hugepages=never
+
+[sysctl]
+kernel.sem = 32000 1024000000 500 32000
+kernel.numa_balancing = 0
+kernel.sched_min_granularity_ns = 3000000
+kernel.sched_wakeup_granularity_ns = 4000000
+vm.dirty_ratio = 40
+vm.dirty_background_ratio = 10
+vm.swappiness = 10
+
diff --git a/roles/sap_vm_preconfigure/files/platform/vmware_vsphere_vm/.gitkeep b/roles/sap_vm_preconfigure/files/platform/vmware_vsphere_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/main.yml b/roles/sap_vm_preconfigure/handlers/main.yml
new file mode 100644
index 0000000..c3d3be1
--- /dev/null
+++ b/roles/sap_vm_preconfigure/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+
+- name: SAP virtual machine preconfigure - Include Handler Tasks for {{ sap_vm_platform }}
+ ansible.builtin.include_tasks: "{{ role_path }}/handlers/platform/{{ sap_vm_platform }}/main.yml"
diff --git a/roles/sap_vm_preconfigure/handlers/platform/cloud_aliyun_ecs_vm/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/cloud_aliyun_ecs_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/cloud_aws_ec2_vs/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/cloud_aws_ec2_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/cloud_gcp_ce_vm/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/cloud_gcp_ce_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/cloud_ibmcloud_powervs/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/cloud_ibmcloud_powervs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/cloud_ibmcloud_vs/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/cloud_ibmcloud_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/cloud_msazure_vm/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/cloud_msazure_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/ibmpower_lpar/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/ibmpower_lpar/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/redhat_ocp_virt_vm/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/redhat_ocp_virt_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml b/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml
new file mode 100644
index 0000000..4a89b9f
--- /dev/null
+++ b/roles/sap_vm_preconfigure/handlers/platform/redhat_rhel_kvm_vm/main.yml
@@ -0,0 +1,94 @@
+---
+
+- name: "Check if server is booted in BIOS or UEFI mode"
+ stat:
+ path: /sys/firmware/efi
+ get_checksum: no
+ register: __sap_vm_preconfigure_register_stat_sys_firmware_efi
+ listen: __sap_vm_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - sap_vm_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: Debug BIOS or UEFI
+ debug:
+ var: __sap_vm_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ listen: __sap_vm_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - sap_vm_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Run grub-mkconfig (BIOS mode)"
+ command: grub2-mkconfig -o /boot/grub2/grub.cfg
+ register: __sap_vm_preconfigure_register_grub2_mkconfig_bios_mode
+ listen: __sap_vm_preconfigure_regenerate_grub2_conf_handler
+ notify: __sap_vm_preconfigure_reboot_handler
+ when:
+ - not __sap_vm_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_vm_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Debug grub-mkconfig BIOS mode"
+ debug:
+ var: __sap_vm_preconfigure_register_grub2_mkconfig_bios_mode.stdout_lines,
+ __sap_vm_preconfigure_register_grub2_mkconfig_bios_mode.stderr_lines
+ listen: __sap_vm_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - not __sap_vm_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_vm_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Set the grub.cfg location RHEL"
+ set_fact:
+ __sap_vm_preconfigure_uefi_boot_dir: /boot/efi/EFI/redhat/grub.cfg
+ when:
+ - ansible_distribution == 'RedHat'
+
+- name: "Set the grub.cfg location SLES"
+ set_fact:
+ __sap_vm_preconfigure_uefi_boot_dir: /boot/efi/EFI/BOOT/grub.cfg
+ when:
+ - ansible_distribution == 'SLES' or ansible_distribution == 'SLES_SAP'
+
+- name: "Run grub-mkconfig (UEFI mode)"
+ command: "grub2-mkconfig -o {{ __sap_vm_preconfigure_uefi_boot_dir }}"
+ register: __sap_vm_preconfigure_register_grub2_mkconfig_uefi_mode
+ listen: __sap_vm_preconfigure_regenerate_grub2_conf_handler
+ notify: __sap_vm_preconfigure_reboot_handler
+ when:
+ - __sap_vm_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_vm_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Debug grub-mkconfig UEFI"
+ debug:
+ var: __sap_vm_preconfigure_register_grub2_mkconfig_uefi_mode.stdout_lines,
+ __sap_vm_preconfigure_register_grub2_mkconfig_uefi_mode.stderr_lines
+ listen: __sap_vm_preconfigure_regenerate_grub2_conf_handler
+ when:
+ - __sap_vm_preconfigure_register_stat_sys_firmware_efi.stat.exists
+ - sap_vm_preconfigure_run_grub2_mkconfig|d(true)
+
+- name: "Run grubby for enabling TSX"
+ command: grubby --args="tsx=on" --update-kernel=ALL
+ register: __sap_vm_preconfigure_register_grubby_update
+ listen: __sap_vm_preconfigure_grubby_update_handler
+ notify: __sap_vm_preconfigure_reboot_handler
+
+- name: Reboot the managed node
+ reboot:
+ test_command: /bin/true
+ listen: __sap_vm_preconfigure_reboot_handler
+ when:
+ - sap_vm_preconfigure_reboot_ok|d(false)
+
+- name: Let the role fail if a reboot is required
+ fail:
+ msg: Reboot is required!
+ listen: __sap_vm_preconfigure_reboot_handler
+ when:
+ - sap_vm_preconfigure_fail_if_reboot_required|d(true)
+ - not sap_vm_preconfigure_reboot_ok|d(false)
+
+- name: Show a warning message if a reboot is required
+ debug:
+ msg: "WARN: Reboot is required!"
+ listen: __sap_vm_preconfigure_reboot_handler
+ when:
+ - not sap_vm_preconfigure_fail_if_reboot_required|d(true)
+ - not sap_vm_preconfigure_reboot_ok|d(false)
diff --git a/roles/sap_vm_preconfigure/handlers/platform/vmware_vsphere_vm/.gitkeep b/roles/sap_vm_preconfigure/handlers/platform/vmware_vsphere_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/meta/main.yml b/roles/sap_vm_preconfigure/meta/main.yml
new file mode 100644
index 0000000..0b7feee
--- /dev/null
+++ b/roles/sap_vm_preconfigure/meta/main.yml
@@ -0,0 +1,10 @@
+---
+galaxy_info:
+ namespace: community
+ role_name: sap_vm_preconfigure
+ author: Nils Koenig
+ description: Provide the configuration of Virtual Machines running on SAP certified Cloud IaaS or SAP certified Hypervisors
+ license: Apache-2.0
+ min_ansible_version: 2.9
+ galaxy_tags: ['sap', 'hana', 'rhel', 'redhat', 'sles', 'suse']
+dependencies: []
diff --git a/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml b/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml
new file mode 100644
index 0000000..ee7161c
--- /dev/null
+++ b/roles/sap_vm_preconfigure/tasks/detect_platform/main.yml
@@ -0,0 +1,131 @@
+---
+# Platform detection for cloud and other infrastructure platforms.
+
+### Facts already available to Ansible
+#
+### Amazon Web Services EC2 Virtual Server. Not applicable for AWS Classic.
+# ansible_chassis_asset_tag: "Amazon EC2" # SMBIOS Chassis Asset Tag
+# ansible_board_asset_tag: "i-043d3c1a889ed9016" # SMBIOS Baseboard Asset Tag, ID of virtual machine on platform
+# ansible_chassis_vendor: "Amazon EC2"
+# ansible_product_name: "r5.8xlarge" # IaaS profile name
+# ansible_system_vendor: "Amazon EC2"
+#
+### Google Cloud Compute Engine Virtual Machine.
+# ansible_chassis_asset_tag: "NA" # SMBIOS Chassis Asset Tag
+# ansible_board_asset_tag: "9EAF3038-7EF5-3F1E-6620-FB3BDA7A3709" # SMBIOS Baseboard Asset Tag, ID of virtual machine on platform
+# ansible_chassis_vendor: "Google"
+# ansible_product_name: "Google Compute Engine"
+# ansible_system_vendor: "Google"
+#
+### IBM Cloud Virtual Server. Not applicable for IBM Cloud Classic Infrastructure.
+# ansible_chassis_asset_tag: "ibmcloud" # SMBIOS Chassis Asset Tag
+# ansible_board_asset_tag: "0c7d4459-xxxx-yyyy-zzzz-abcdefghijkl" # SMBIOS Baseboard Asset Tag, ID of virtual machine on platform
+# ansible_chassis_vendor: "IBM:Cloud Compute Server 1.0:mx2-16x128" # IaaS profile name
+# ansible_product_name: "Standard PC (i440FX + PIIX, 1996)"
+# ansible_system_vendor: "QEMU"
+#
+### Microsoft Azure Virtual Machine. Not applicable for MS Azure Classic/ASM.
+# ansible_chassis_asset_tag: "7783-xxxx-yyyy-zzzz-aaaa-bbbb-cc" # SMBIOS Chassis Asset Tag
+# ansible_board_asset_tag: "None" # SMBIOS Baseboard Asset Tag
+# ansible_chassis_vendor: "Virtual Machine"
+# ansible_product_name: "Microsoft Corporation"
+# ansible_system_vendor: "70f4a858-1eea-4c35-b9e1-e179c32fc6b5" # ID of virtual machine on platform
+#
+### VMware vSphere
+# ansible_product_name: "VMware7,1",
+# ansible_system_vendor: "VMware, Inc.",
+# ansible_virtualization_type: "VMware"
+#
+### End of comment
+
+
+# TODO: detection based on multiple facts and providing one standard
+# name for use as platform type in related include files
+# cloud_aliyun_ecs_vm, cloud_aws_ec2_vs, cloud_gcp_ce_vm, cloud_ibmcloud_powervs, cloud_ibmcloud_vs, cloud_msazure_vm,
+# hyp_ibmpower_lpar, hyp_redhat_ocp_virt_vm, hyp_redhat_rhel_kvm_vm, hyp_vmware_vsphere_vm
+
+- name: "SAP VM Preconfigure - Check if platform is Amazon Web Services EC2 Virtual Server"
+ when:
+ - ansible_system_vendor == 'Amazon EC2'
+ ansible.builtin.set_fact:
+ sap_vm_platform: cloud_aws_ec2_vs
+
+# - name: "SAP VM Preconfigure - Check if platform is Google Cloud Compute Engine Virtual Machine"
+# when:
+# - ansible_product_name == 'Google Compute Engine'
+# ansible.builtin.set_fact:
+# sap_vm_platform: cloud_gcp_ce_vm
+
+- name: "SAP VM Preconfigure - Check if platform is IBM Cloud Virtual Server"
+ when:
+ - ansible_chassis_asset_tag == 'ibmcloud'
+ ansible.builtin.set_fact:
+ sap_vm_platform: cloud_ibmcloud_vs
+
+# - name: "SAP VM Preconfigure - Check if platform is Microsoft Azure Virtual Machine"
+# when:
+# - ansible_chassis_vendor == 'Virtual Machine'
+# - ansible_product_name == 'Microsoft Corporation'
+# ansible.builtin.set_fact:
+# sap_vm_platform: cloud_msazure_vm
+
+# - name: "SAP VM Preconfigure - Check if platform is VMware vSphere"
+# when:
+# - ansible_virtualization_type == 'VMware'
+# ansible.builtin.set_fact:
+# sap_vm_platform: hyp_vmware_vsphere_vm
+
+
+- name: SAP VM Preconfigure - confirm AWS EC2 Virtual Server
+ when: sap_vm_platform == cloud_aws_ec2_vs
+ block:
+
+ - name: (AWS) Get instance metadata token
+ ansible.builtin.uri:
+ headers:
+ X-aws-ec2-metadata-token-ttl-seconds: 21600
+ method: PUT
+ return_content: true
+ url: http://169.254.169.254/latest/api/token
+ register: detect_cloud_provider_aws_token
+ changed_when: false
+ ignore_errors: true
+
+ - name: (AWS) Get instance metadata ami-id
+ ansible.builtin.uri:
+ headers:
+ X-aws-ec2-metadata-token: "{{ detect_cloud_provider_aws_token.content }}"
+ method: GET
+ return_content: true
+ url: http://169.254.169.254/latest/meta-data/ami-id
+ register: detect_cloud_provider_aws_ami_id
+ changed_when: false
+ ignore_errors: true
+
+ - name: (AWS) Fail if cannot reach Instance Metadata Service
+ ansible.builtin.fail:
+ msg: Detected MS Azure, but could not confirm with the Instance Metadata Service
+ when:
+ - detect_cloud_provider_aws_ami_id.failed
+
+
+- name: SAP VM Preconfigure - confirm Microsoft Azure Virtual Machine
+ when: sap_vm_platform == cloud_msazure_vm
+ block:
+
+ - name: (Azure) Get instance metadata
+ ansible.builtin.uri:
+ headers:
+ Metadata: true
+ method: GET
+ url: http://169.254.169.254/metadata/instance/compute?api-version=2021-10-01
+ register: detect_cloud_provider_azure_instance_metadata
+ changed_when: false
+ ignore_errors: true
+
+ - name: (Azure) Fail if cannot reach Instance Metadata Service
+ ansible.builtin.fail:
+ msg: Detected MS Azure, but could not confirm with the Instance Metadata Service
+ when:
+ - detect_cloud_provider_azure_instance_metadata.json.azEnvironment is not defined
+ - detect_cloud_provider_azure_instance_metadata.json.azEnvironment != "AzurePublicCloud"
diff --git a/roles/sap_vm_preconfigure/tasks/main.yml b/roles/sap_vm_preconfigure/tasks/main.yml
new file mode 100644
index 0000000..c900e23
--- /dev/null
+++ b/roles/sap_vm_preconfigure/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+
+- name: SAP virtual machine detect platform
+ ansible.builtin.include_tasks: "{{ role_path }}/tasks/detect_platform/main.yml"
+ when: sap_vm_platform_detect or sap_vm_platform_detect_only
+
+- name: SAP virtual machine preconfigure - Include Defaults Vars for {{ sap_vm_platform }}
+ ansible.builtin.include_vars: "{{ role_path }}/vars/platform_defaults_{{ sap_vm_platform }}.yml"
+ when: not sap_vm_platform_detect_only
+
+ # # Reduce swapping by amending vm.swappiness to 5% of free memory before swap is activated (default is 60%)
+ # - name: Adjust system swappiness
+ # ansible.posix.sysctl:
+ # name: vm.swappiness
+ # value: "5"
+ # state: present
+
+- name: SAP virtual machine preconfigure - Include Tasks for {{ sap_vm_platform }}
+ ansible.builtin.include_tasks: "{{ role_path }}/tasks/platform/{{ sap_vm_platform }}/main.yml"
+ when: not sap_vm_platform_detect_only
diff --git a/roles/sap_vm_preconfigure/tasks/platform/cloud_aliyun_ecs_vm/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/cloud_aliyun_ecs_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/cloud_aws_ec2_vs/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/cloud_aws_ec2_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/cloud_gcp_ce_vm/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/cloud_gcp_ce_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/cloud_ibmcloud_powervs/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/cloud_ibmcloud_powervs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/cloud_ibmcloud_vs/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/cloud_ibmcloud_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/cloud_msazure_vm/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/cloud_msazure_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_ibmpower_lpar/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/hyp_ibmpower_lpar/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_ocp_virt_vm/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_ocp_virt_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml
new file mode 100644
index 0000000..720fa49
--- /dev/null
+++ b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/assert-set-tuned-profile.yml
@@ -0,0 +1,14 @@
+---
+- name: Assert - Check tuned profile
+ block:
+ - name: Get tuned profile
+ ansible.builtin.command: tuned-adm active
+ register: __sap_vm_preconfigure_register_tuned_profile_assert
+ changed_when: __sap_vm_preconfigure_register_tuned_profile_assert.rc != 0
+
+ - name: Verify tuned profile
+ ansible.builtin.assert:
+ that: "'Current active profile: sap-hana-kvm-host' in __sap_vm_preconfigure_register_tuned_profile_assert.stdout"
+ fail_msg: "FAIL: tuned profile is not sap-hana-kvm-guest"
+ success_msg: "PASS: tuned profile is sap-hana-kvm-guest"
+ ignore_errors: "{{ sap_vm_preconfigure_ignore_failed_assertion }}"
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml
new file mode 100644
index 0000000..26a234f
--- /dev/null
+++ b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/main.yml
@@ -0,0 +1,18 @@
+---
+# tasks file for sap_vm_preconfigure
+
+- name: Trigger tuned profile sap-hana-kvm-guest activation
+ ansible.builtin.include_tasks: set-tuned-profile.yml
+
+- name: Set filename prefix to empty string if role is run in normal mode
+ ansible.builtin.set_fact:
+ __sap_vm_preconfigure_fact_assert_prefix: ""
+ when: not sap_vm_preconfigure_assert|d(false)
+
+- name: Prepend filename with assert string if role is run in assert mode
+ ansible.builtin.set_fact:
+ __sap_vm_preconfigure_fact_assert_prefix: "assert-"
+ when: sap_hypervisor_node_preconfigure_assert|d(false)
+
+- name: Include '{{ __sap_vm_preconfigure_fact_assert_prefix }}set-tuned-profile.yml'
+ ansible.builtin.include_tasks: '{{ __sap_vm_preconfigure_fact_assert_prefix }}set-tuned-profile.yml'
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml
new file mode 100644
index 0000000..080f207
--- /dev/null
+++ b/roles/sap_vm_preconfigure/tasks/platform/hyp_redhat_rhel_kvm_vm/set-tuned-profile.yml
@@ -0,0 +1,91 @@
+---
+- name: Create tuned profile directory /usr/lib/tuned/sap-hana
+ ansible.builtin.file:
+ path: /usr/lib/tuned/sap-hana
+ state: directory
+ mode: "0755"
+
+- name: Create sap-hana tuned profile
+ ansible.builtin.copy:
+ dest: "/usr/lib/tuned/sap-hana/tuned.conf"
+ mode: "0644"
+ content: |
+ #
+ # tuned configuration
+ #
+ [main]
+ summary=Optimize for SAP HANA
+
+ [cpu]
+ force_latency=cstate.id:3|70
+ governor=performance
+ energy_perf_bias=performance
+ min_perf_pct=100
+
+ [vm]
+ transparent_hugepages=never
+
+ [sysctl]
+ kernel.sem = 32000 1024000000 500 32000
+ kernel.numa_balancing = 0
+ kernel.sched_min_granularity_ns = 3000000
+ kernel.sched_wakeup_granularity_ns = 4000000
+ vm.dirty_ratio = 40
+ vm.dirty_background_ratio = 10
+ vm.swappiness = 10
+
+
+- name: Create tuned profile directory /usr/lib/tuned/sap-hana-kvm-guest
+ ansible.builtin.file:
+ path: /usr/lib/tuned/sap-hana-kvm-guest
+ mode: "0755"
+ state: directory
+
+- name: Add haltpoll.sh for tuned sap-hana-kvm-guest
+ ansible.builtin.copy:
+ dest: "/usr/lib/tuned/sap-hana-kvm-guest/haltpoll.sh"
+ mode: "0744"
+ content: |
+ #!/bin/bash
+
+ if [ "$1" == "start" ]; then
+ modprobe cpuidle-haltpoll force
+ fi
+
+ ## Question: Does this also need another "if" checking to see if $1 is "stop" to unload the module?
+
+
+- name: Create sap-hana-kvm-guest tuned profile
+ ansible.builtin.copy:
+ dest: "/usr/lib/tuned/sap-hana-kvm-guest/tuned.conf"
+ mode: "0644"
+ content: |
+ #
+ # tuned configuration
+ #
+ [main]
+ summary=Optimize for running SAP HANA on KVM inside a virtual guest
+ include=sap-hana
+
+ [haltpoll]
+ type=script
+ script=${i:PROFILE_DIR}/haltpoll.sh
+
+ [sysfs]
+ /sys/devices/system/clocksource/clocksource0/current_clocksource=tsc
+ /sys/module/haltpoll/parameters/guest_halt_poll_ns=2400000
+ /sys/module/haltpoll/parameters/guest_halt_poll_grow_start=2400000
+
+ [sysctl]
+ kernel.sched_latency_ns=12000000
+ kernel.sched_migration_cost_ns=500000
+ kernel.sched_min_granularity_ns=12000000
+ kernel.sched_wakeup_granularity_ns=15000000
+
+ [bootloader]
+ cmdline_saphana=skew_tick=1
+
+- name: Activate tuned profile
+ ansible.builtin.command: tuned-adm profile sap-hana-kvm-guest
+ register: __sap_provision_vm_register_tuned_sap_hana_kvm_guest_status
+ changed_when: __sap_provision_vm_register_tuned_sap_hana_kvm_guest_status.rc != 0
diff --git a/roles/sap_vm_preconfigure/tasks/platform/hyp_vmware_vsphere_vm/.gitkeep b/roles/sap_vm_preconfigure/tasks/platform/hyp_vmware_vsphere_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_preconfigure/vars/main.yml b/roles/sap_vm_preconfigure/vars/main.yml
new file mode 100644
index 0000000..ed97d53
--- /dev/null
+++ b/roles/sap_vm_preconfigure/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/sap_vm_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml b/roles/sap_vm_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
new file mode 100644
index 0000000..88a3eaf
--- /dev/null
+++ b/roles/sap_vm_preconfigure/vars/platform_defaults_redhat_rhel_kvm.yml
@@ -0,0 +1,11 @@
+---
+
+# defaults file for sap_vm_preconfigure
+
+sap_vm_preconfigure_run_grub2_mkconfig: true
+
+# run role in assert mode?
+sap_vm_preconfigure_assert: false
+
+# fail if assertion is invalid
+sap_vm_preconfigure_ignore_failed_assertion: false
diff --git a/roles/sap_vm_provision/PLATFORM_GUIDANCE.md b/roles/sap_vm_provision/PLATFORM_GUIDANCE.md
new file mode 100644
index 0000000..376a1e4
--- /dev/null
+++ b/roles/sap_vm_provision/PLATFORM_GUIDANCE.md
@@ -0,0 +1,299 @@
+# Infrastructure Platform Guidance
+
+Table of Contents:
+- [Required resources when Ansible provisioning VMs](#required-resources-when-ansible-provisioning-vms)
+- [Recommended Infrastructure Platform authorizations](#recommended-infrastructure-platform-authorizations)
+- [Recommended Infrastructure Platform configuration](#recommended-infrastructure-platform-configuration)
+
+
+## Required resources when Ansible provisioning VMs
+
+The following does not apply if Ansible to Terraform is used.
+
+See below for the drop-down list of required environment resources on an Infrastructure Platform resources when Ansible is used to provision Virtual Machines.
+
+
+Amazon Web Services (AWS):
+
+- VPC
+ - VPC Access Control List (ACL)
+ - VPC Subnets
+ - VPC Security Groups
+- Route53 (Private DNS)
+- Internet Gateway (SNAT)
+- EFS (NFS)
+- Bastion host (AWS EC2 VS)
+- Key Pair for hosts
+
+
+
+
+Google Cloud (GCP):
+
+- VPC Network
+ - VPC Subnetwork
+- Compute Firewall
+- Compute Router
+ - SNAT
+- DNS Managed Zone (Private DNS)
+- Filestore (NFS)
+- Bastion host (GCP CE VM)
+
+
+
+
+Microsoft Azure:
+
+- Resource Group
+- VNet
+ - VNet Subnet
+ - VNet Network Security Group (NSG)
+- Private DNS Zone
+- NAT Gateway (SNAT)
+- Storage Account
+ - Azure Files (aka. File Storage Share, NFS)
+ - Private Endpoint Connection
+- Bastion host (MS Azure VM)
+- Key Pair for hosts
+
+
+
+
+IBM Cloud:
+
+- Resource Group
+- VPC
+ - VPC Access Control List (ACL)
+ - VPC Subnets
+ - VPC Security Groups
+- Private DNS
+- Public Gateway (SNAT)
+- File Share (NFS)
+- Bastion host (IBM Cloud VS)
+- Key Pair for hosts
+
+
+
+
+IBM Cloud, IBM Power VS:
+
+- Resource Group
+- IBM Power Workspace
+ - VLAN Subnet
+ - Cloud Connection (from secure enclave to IBM Cloud)
+- Private DNS Zone
+- Public Gateway (SNAT)
+- Bastion host (IBM Cloud VS or IBM Power VS)
+- Key Pair for hosts (in IBM Power Workspace)
+
+
+
+
+IBM PowerVC:
+
+- Host Group Shared Processor Pool
+- Storage Template
+- Network Configuration (for SEA or SR-IOV)
+- VM OS Image
+- Key Pair for hosts
+
+
+
+
+KubeVirt:
+
+- `TODO`
+
+
+
+
+OVirt:
+
+- `TODO`
+
+
+
+
+VMware vCenter:
+
+- Datacenter (SDDC)
+ - Cluster
+ - Hosts
+- NSX
+- Datastore
+- Content Library
+ - VM Template
+
+
+
+
+
+## Recommended Infrastructure Platform authorizations
+
+See below for the drop-down list of recommended authorizations for each Infrastructure Platform.
+
+
+
+Amazon Web Services (AWS):
+
+The AWS User and associated key/secret will need to be assigned, by the Cloud Account Administrator. A recommended minimum of AWS IAM user authorization is achieved with the following AWS CLI commands:
+```shell
+# Login
+aws configure
+
+# Create AWS IAM Policy Group
+aws iam create-group --group-name 'ag-sap-automation'
+aws iam attach-group-policy --group-name 'ag-sap-automation' --policy-arn arn:aws:iam::aws:policy/AmazonVPCFullAccess
+aws iam attach-group-policy --group-name 'ag-sap-automation' --policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess
+aws iam attach-group-policy --group-name 'ag-sap-automation' --policy-arn arn:aws:iam::aws:policy/AmazonRoute53FullAccess
+```
+
+
+
+
+Google Cloud (GCP):
+
+Google Cloud Platform places upper limit quotas for different resources and limits `'CPUS_ALL_REGIONS'` and `'SSD_TOTAL_GB'` may be too low if using a new GCP Account or a new target GCP Region. Please check `gcloud compute regions describe us-central1 --format="table(quotas:format='table(metric,limit,usage)')"` before provisioning to a GCP Region, and manually request quota increases for these limits in the target GCP Region using instructions on https://cloud.google.com/docs/quota#requesting_higher_quota (from GCP Console or contact with GCP Support Team).
+
+The Google Cloud User credentials (Client ID and Client Secret) JSON file with associated authorizations will need to be assigned, by the Cloud Account Administrator. Thereafter, please manually open and activate various APIs for the GCP Project to avoid HTTP 403 errors during provisioning:
+- Enable the Compute Engine API, using https://console.cloud.google.com/apis/api/compute.googleapis.com/overview
+- Enable the Cloud DNS API, using https://console.cloud.google.com/apis/api/dns.googleapis.com/overview
+- Enable the Network Connectivity API, using https://console.cloud.google.com/apis/library/networkconnectivity.googleapis.com
+- Enable the Cloud Filestore API, using https://console.cloud.google.com/apis/library/file.googleapis.com
+- Enable the Service Networking API (Private Services Connection to Filestore), using https://console.cloud.google.com/apis/library/servicenetworking.googleapis.com
+
+
+
+
+Microsoft Azure:
+
+The Azure Application Service Principal and associated Client ID and Client Secret will need to be assigned, by the Cloud Account Administrator. A recommended minimum of Azure AD Role authorizations is achieved with the following MS Azure CLI commands:
+
+```shell
+# Login
+az login
+
+# Show Tenant and Subscription ID
+export AZ_SUBSCRIPTION_ID=$(az account show | jq .id --raw-output)
+export AZ_TENANT_ID=$(az account show | jq .tenantId --raw-output)
+
+# Create Azure Application, includes Client ID
+export AZ_CLIENT_ID=$(az ad app create --display-name ansible-terraform | jq .appId --raw-output)
+
+# Create Azure Service Principal, instantiation of Azure Application
+export AZ_SERVICE_PRINCIPAL_ID=$(az ad sp create --id $AZ_CLIENT_ID | jq .objectId --raw-output)
+
+# Assign default Azure AD Role with privileges for creating Azure Virtual Machines
+az role assignment create --assignee "$AZ_SERVICE_PRINCIPAL_ID" \
+--subscription "$AZ_SUBSCRIPTION_ID" \
+--role "Virtual Machine Contributor" \
+--role "Contributor"
+
+# Reset Azure Application, to provide the Client ID and Client Secret to use the Azure Service Principal
+az ad sp credential reset --name $AZ_CLIENT_ID
+```
+
+Note: MS Azure VMs provisioned will contain Hyper-V Hypervisor virtual interfaces using eth* on the OS, and when Accelerated Networking (AccelNet) is enabled for the MS Azure VM then the Mellanox SmartNIC/DPU SR-IOV Virtual Function (VF) may use enP* on the OS. For further information, see [MS Azure - How Accelerated Networking works](https://learn.microsoft.com/en-us/azure/virtual-network/accelerated-networking-how-it-works). During High Availability executions, failures may occur and may require additional variable 'sap_ha_pacemaker_cluster_vip_client_interface' to be defined.
+
+
+
+
+IBM Cloud:
+
+The IBM Cloud Account User (or Service ID) and associated API Key will need to be assigned, by the Cloud Account Administrator. A recommended minimum of IBM Cloud IAM user authorization is achieved with the following IBM Cloud CLI commands:
+
+```shell
+# Login (see alternatives for user/password and SSO using ibmcloud login --help)
+ibmcloud login --apikey=
+
+# Create IBM Cloud IAM Access Group
+ibmcloud iam access-group-create 'ag-sap-automation'
+ibmcloud iam access-group-policy-create 'ag-sap-automation' --roles Editor --service-name=is
+ibmcloud iam access-group-policy-create 'ag-sap-automation' --roles Editor,Manager --service-name=transit
+ibmcloud iam access-group-policy-create 'ag-sap-automation' --roles Editor,Manager --service-name=dns-svcs
+
+# Access to create an IBM Cloud Resource Group (Ansible to Terraform)
+ibmcloud iam access-group-policy-create 'ag-sap-automation' --roles Administrator --resource-type=resource-group
+
+# Assign to a specified Account User or Service ID
+ibmcloud iam access-group-user-add 'ag-sap-automation' <<>>
+ibmcloud iam access-group-service-id-add 'ag-sap-automation' <<>>
+```
+
+Alternatively, use the IBM Cloud web console:
+- Open cloud.ibm.com - click Manage on navbar, click Access IAM, then on left nav menu click Access Groups
+- Create an Access Group, with the following policies:
+ - IAM Services > VPC Infrastructure Services > click All resources as scope + Platform Access as Editor
+ - IAM Services > DNS Services > click All resources as scope + Platform Access as Editor + Service access as Manager
+ - IAM Services > Transit Gateway > click All resources as scope + Platform Access as Editor + Service access as Manager
+ - `[OPTIONAL]` IAM Services > All Identity and Access enabled services > click All resources as scope + Platform Access as Viewer + Resource group access as Administrator
+ - `[OPTIONAL]` Account Management > Identity and Access Management > click Platform access as Editor
+ - `[OPTIONAL]` Account Management > IAM Access Groups Service > click All resources as scope + Platform Access as Editor
+
+
+
+
+IBM PowerVC:
+
+The recommended [IBM PowerVC Security Role](https://www.ibm.com/docs/en/powervc/latest?topic=security-managing-roles) is 'Administrator assistant' (admin_assist), because the 'Virtual machine manager' (vm_manager) role is not able to create IBM PowerVM Compute Template (required for setting OpenStack extra_specs specific to the IBM PowerVM hypervisor infrastructure platform, such as Processing Units). Note that the 'Administrator assistant' does not have the privilege to delete Virtual Machines.
+
+
+
+
+## Recommended Infrastructure Platform configuration
+
+See below for the drop-down list of recommended configurations for each Infrastructure Platform.
+
+
+VMware vCenter:
+
+The VM Template must be prepared with cloud-init. This process is subjective to VMware, cloud-init and Guest OS (RHEL / SLES) versions; success will vary. This requires:
+
+- Edit the default cloud-init configuration file, found at `/etc/cloud/cloud.cfg`. It must contain the data source for VMware (and not OVF), and force use of cloud-init metadata and userdata files. Note: appending key `network: {config: disabled}` may cause network `v1` to be incorrectly used instead of network [`v2`](https://cloudinit.readthedocs.io/en/latest/reference/network-config-format-v2.html) in the cloud-init metadata YAML to follow.
+ ```yaml
+ # Enable VMware VM Guest OS Customization with cloud-init (set to true for traditional customization)
+ disable_vmware_customization: false
+
+ # Use allow raw data to directly use the cloud-init metadata and user data files provided by the VMware VM Customization Specification
+ # Wait 120 seconds for VMware VM Customization file to be available
+ datasource:
+ VMware:
+ allow_raw_data: true
+ vmware_cust_file_max_wait: 60
+ ```
+- Update `cloud-init` and `open-vm-tools` OS Package
+- Enable DHCP on the OS Network Interface (e.g. eth0, ens192 etc.)
+- Prior to VM shutdown and marking as a VMware VM Template, run commands:
+ - `vmware-toolbox-cmd config set deployPkg enable-custom-scripts true`
+ - `vmware-toolbox-cmd config set deployPkg wait-cloudinit-timeout 60`
+ - `sudo cloud-init clean --seed --logs` to remove cloud-init logs, remove cloud-init seed directory /var/lib/cloud/seed.
+ - If using cloud-init versions prior to 22.3.0 then do not use `--machine-id` parameter.
+ - Reportedly, the `--machine-id` parameter which removes `/etc/machine-id` may on first reboot cause the OS Network Interfaces to be `DOWN` which causes the DHCP Request to silently error.
+- Once VM is shutdown, then run 'Clone > Clone as Template to Library'
+- After provisioning the VM Template via Ansible, debug by checking:
+ - `/var/log/vmware-imc/toolsDeployPkg.log`
+ - `/var/log/cloud-init-output.log`
+ - `/var/log/cloud-init.log`
+ - `/var/lib/cloud/instance/user-data.txt`
+ - `/var/lib/cloud/instance/cloud-config.txt`
+ - `/var/run/cloud-init/instance-data.json`
+ - `/var/run/cloud-init/status.json`
+- See documentation for further information:
+ - [VMware KB 59557 - How to switch vSphere Guest OS Customization engine for Linux virtual machine](https://kb.vmware.com/s/article/59557)
+ - [VMware KB 90331 - How does vSphere Guest OS Customization work with cloud-init to customize a Linux VM](https://kb.vmware.com/s/article/90331)
+ - [VMware KB 91809 - VMware guest customization key cloud-init changes](https://kb.vmware.com/s/article/91809)
+ - [VMware KB 74880 - Setting the customization script for virtual machines in vSphere 7.x and 8.x](https://kb.vmware.com/s/article/74880)
+ - [vSphere Web Services SDK Programming Guide - Guest Customization Using cloud-init](https://developer.vmware.com/docs/18555/GUID-75E27FA9-2E40-4CBF-BF3D-22DCFC8F11F7.html)
+ - [cloud-init documentation - Reference - Datasources - VMware](https://cloudinit.readthedocs.io/en/latest/reference/datasources/vmware.html)
+
+
+In addition, the provisioned Virtual Machine must be accessible from the Ansible Controller (i.e. device where Ansible Playbook for SAP is executed must be able to reach the provisioned host).
+
+When VMware vCenter and vSphere clusters with VMware NSX virtualized network overlays using Segments (e.g. 192.168.0.0/16) connected to Tier-0/Tier-1 Gateways (which are bound to the backbone network subnet, e.g. 10.0.0.0/8), it is recommended to:
+- Use DHCP Server and attach to Subnet for the target VM. For example, create DHCP Server (e.g. NSX > Networking > Networking Profiles > DHCP Profile), set DHCP in the Gateway (e.g. NSX > Networking > Gateway > Edit > DHCP Config), then set for the Subnet (e.g. NSX > Networking > Segment > <> > Set DHCP Config) which the VMware VM Template is attached to; this allows subsequent cloned VMs to obtain an IPv4 Address
+- Use DNAT configuration for any VMware NSX Segments (e.g. NSX-T Policy NAT Rule)
+- For outbound internet connectivity, use SNAT configuration (e.g. rule added on NSX Gateway) set for the Subnet which the VMware VM Template is attached to. Alternatively, use a Web Forward Proxy.
+
+N.B. When VMware vCenter and vSphere clusters with direct network subnet IP allocations to the VMXNet network adapter (no VMware NSX network overlays), the above actions may not be required.
+
+
diff --git a/roles/sap_vm_provision/README.md b/roles/sap_vm_provision/README.md
new file mode 100644
index 0000000..3d9871d
--- /dev/null
+++ b/roles/sap_vm_provision/README.md
@@ -0,0 +1,183 @@
+# sap_vm_provision Ansible Role
+
+Ansible Role to provision Virtual Machines to host SAP Software.
+
+This Ansible Role will provision Virtual Machines to different Infrastructure Platforms; with optional Ansible to Terraform to provision minimal landing zone (partial compatibility via [Terraform Modules for SAP](https://github.com/sap-linuxlab/terraform.modules_for_sap)).
+
+Primarily, this Ansible Role was designed to be executed end-to-end (i.e. Provision host/s, configure OS for SAP Software, install SAP Software, instantiate the SAP System); such as the [Ansible Playbooks for SAP](https://github.com/sap-linuxlab/ansible.playbooks_for_sap).
+
+
+## Functionality
+
+The provisioned hosts by the Ansible Role provide a near-homogenous setup across different Infrastructure Platforms, while following requirements and best practices defined by each vendor.
+
+A series of choices is provided by the Ansible Role:
+- Infrastructure-as-Code type (Ansible or Ansible to Terraform)
+- Infrastructure Platform
+- Host Specification Dictionary, containing 1..n Plans
+- Host OS Image Dictionary
+
+Dependent on the choices made by the end user, host/s will be provisioend to the target Infrastructure Platform.
+
+## Scope
+
+The code modularity and commonality of provisioning enables a wide gamut of SAP Software Solution Scenarios to be deployed to many Infrastructure Platforms with differing configuration.
+
+### Available Infrastructure Platforms
+
+- AWS EC2 Virtual Server instance/s
+- Google Cloud Compute Engine Virtual Machine/s
+- IBM Cloud, Intel Virtual Server/s
+- IBM Cloud, Power Virtual Server/s
+- Microsoft Azure Virtual Machine/s
+- IBM PowerVM Virtual Machine/s _(formerly LPAR/s)_
+- OVirt Virtual Machine/s (e.g. Red Hat Enterprise Linux KVM)
+- KubeVirt Virtual Machine/s (e.g. Red Hat OpenShift Virtualization, SUSE Rancher with Harvester HCI) `[Experimental]`
+- VMware vSphere Virtual Machine/s `[Beta]`
+
+### Known issues
+
+- VMware REST API combined with cloud-init is unstable, `userdata` configuration may not execute and provisioning will fail
+
+
+## Requirements
+
+### Target Infrastructure Platform
+
+For a list of requirements and recommended authorizations on each Infrastructure Platform, please see the separate [Infrastructure Platform Guidance](./PLATFORM_GUIDANCE.md) document and the drop-down for each different Infrastructure Platform.
+
+### Target hosts
+
+**OS Versions:**
+- Red Hat Enterprise Linux 8.0+
+- SUSE Linux Enterprise Server 15 SP0+
+
+### Execution/Controller host
+
+**Dependencies:**
+- OS Packages
+ - Python 3.9.7+ (i.e. CPython distribution)
+ - AWS CLI _(when High Availability on AWS)_
+ - GCloud CLI _(when High Availability on GCP)_
+ - IBM Cloud CLI _(when High Availability on IBM Cloud)_
+ - Terraform 1.0.0-1.5.5 _(when Ansible to Terraform, or legacy Ansible Collection for IBM Cloud)_
+- Python Packages
+ - `requests` 2.0+
+ - `passlib` 1.7+
+ - `jmespath` 1.0.1+
+ - `boto3` for Amazon Web Services
+ - `google-auth` for Google Cloud
+ - `https://raw.githubusercontent.com/ansible-collections/azure/dev/requirements-azure.txt` for Microsoft Azure
+ - `openstacksdk` for IBM PowerVM
+ - `ovirt-engine-sdk-python` for OVirt
+ - `aiohttp` for VMware
+- Ansible
+ - Ansible Core 2.12.0+
+ - Ansible Collections:
+ - `amazon.aws`
+ - `azure.azcollection`
+ - `cloud.common`
+ - `cloud.terraform`
+ - `community.aws`
+ - `google.cloud`
+ - `ibm.cloudcollection`
+ - _(legacy, to be replaced with `ibm.cloud` in future)_
+ - `kubevirt.core`
+ - `openstack.cloud`
+ - `ovirt.ovirt`
+ - `vmware.vmware_rest` _(requires `cloud.common`)_
+
+
+## Execution
+
+### Sample execution
+
+For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/).
+
+### Suggested execution sequence
+
+Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first.
+
+### Summary of execution flow
+
+- Define target Host/s Specifications with a 'plan' name (e.g. `test1_256gb_memory` containing 1 host of 256GB Memory for SAP HANA and 1 host for SAP NetWeaver); append to the Host Specification Dictionary
+- Define target Host OS Image Dictionary, or use defaults provided for each Cloud Hyperscaler.
+- Execute with chosen:
+ - Infrastructure-as-Code method (Ansible or Ansible to Terraform) using variable `sap_vm_provision_iac_type`
+ - Infrastructure Platform target using variable `sap_vm_provision_iac_platform`
+ - Selected plan using variable `sap_vm_provision_host_specification_plan` referring to the definition in the Host Specification Dictionary
+ - Variables specific to each Infrastructure Platform (e.g. `sap_vm_provision_aws_access_key`)
+ - Include files from subdirectory based upon chosen method and target (e.g. `/tasks/platform_ansible_to_terraform/aws_ec2_vs/`)
+- Provision host/s
+- Add hosts to Ansible Inventory Groups defined by the Host Specification Dictionary _(e.g. hana_primary, hana_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas, anydb_primary, anydb_secondary)_
+- Perform additional tasks for host/s (e.g. DNS Records, /etc/hosts, register OS for Packages, register Web Forward Proxy)
+- Set variables if other Ansible Roles are to be executed (e.g. variables for Ansible Roles in the `sap_install` Ansible Collection)
+- Perform any tasks for High Availability (execution dependent on hosts in Ansible Inventory Groups)
+- **POST:** Re-execute Ansible Role with variable `sap_vm_provision_iac_post_deployment: true` to update High Availability configurations using Load Balancer (i.e. LB Health Check Port moved to Linux Pacemaker listener)
+
+
+### Required structure in Ansible Playbook
+
+_**CRITICAL NOTE**_
+
+To provide parallelisation of provisioning, the following structure must be used to dynamically create an Ansible Inventory Group for the requested hostnames. Without this necessary pre-task, the Ansible Role will not function.
+
+> Design decision note: This required structure avoids the Ansible Role using a sequential loop, where each host will execute all Ansible Tasks before the next host is provisioned; or using an async loop which hides all Ansible Task output from the end user.
+
+This required structure will:
+
+- In the first Ansible Play using `localhost`, dynamically create an Ansible Inventory with the hostnames listed parsed from the Ansible Dictionary (variable named `sap_vm_provision_XYZ_host_specifications_dictionary` dependent on the Infrastructure Platform)
+- In the second Ansible Play use the dynamic Ansible Inventory `sap_vm_provision_target_inventory_group`, create an Ansible Play Batch containing each target host in the dynamic Ansible Inventory, which will then execute all proceeding Ansible Tasks in parallel for each target host.
+
+**Structure to execute sap_vm_provision:**
+
+```yaml
+- name: Ansible Play to create dynamic inventory group for provisioning
+ hosts: localhost
+ gather_facts: false
+ tasks:
+
+ - name: Create dynamic inventory group for Ansible Role sap_vm_provision
+ ansible.builtin.add_host:
+ name: "{{ item }}"
+ group: sap_vm_provision_target_inventory_group
+ # Adjust var name in loop (i.e. replace _XYZ_ to the correct Ansible Dictionary)
+ loop: "{{ sap_vm_provision_XYZ_host_specifications_dictionary[sap_vm_provision_host_specification_plan].keys() }}"
+
+- name: Ansible Play to provision hosts for SAP
+ hosts: sap_vm_provision_target_inventory_group # Ansible Play target hosts pattern, use dynamic Inventory Group
+ gather_facts: false
+ tasks:
+
+ - name: Execute Ansible Role sap_vm_provision
+ ansible.builtin.include_role:
+ name: community.sap_infrastructure.sap_vm_provision
+
+- name: Ansible Play for verify provisioned hosts for SAP
+ hosts: all
+ tasks:
+
+ - name: Verify hosts provisioned by sap_vm_provision and assigned Inventory Groups
+ ansible.builtin.debug:
+ var: groups
+```
+
+### Tags to control execution
+
+There are no tags used to control the execution of this Ansible Role
+
+
+## License
+
+Apache 2.0
+
+
+## Authors
+
+Sean Freeman
+
+---
+
+## Ansible Role Input Variables
+
+Please first check the [/defaults parameters file](./defaults/main.yml).
diff --git a/roles/sap_vm_provision/defaults/main.yml b/roles/sap_vm_provision/defaults/main.yml
new file mode 100644
index 0000000..b5b633c
--- /dev/null
+++ b/roles/sap_vm_provision/defaults/main.yml
@@ -0,0 +1,572 @@
+---
+
+####
+# VM Provision selection
+####
+
+# ansible , ansible_to_terraform
+sap_vm_provision_iac_type: ""
+
+# aws_ec2_vs , gcp_ce_vm , ibmcloud_vs , ibmcloud_powervs , msazure_vm , ibmpowervm_vm , kubevirt_vm , ovirt_vm , vmware_vm
+sap_vm_provision_iac_platform: ""
+
+
+####
+# VM Provision Infrastructure-as-Code (IaC) Configuration - Ansible provisioning - Cloud Hyperscaler
+# Only for use when 'ansible' is value provided for variable sap_vm_provision_iac_type
+####
+
+sap_vm_provision_bastion_public_ip: ""
+
+sap_vm_provision_bastion_ssh_port: 50222
+sap_vm_provision_bastion_user: ""
+
+sap_vm_provision_ssh_bastion_private_key_file_path: ""
+sap_vm_provision_ssh_host_private_key_file_path: ""
+sap_vm_provision_ssh_host_public_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path + '.pub' }}"
+
+
+####
+# VM Provision Infrastructure-as-Code (IaC) Configuration - Ansible to Terraform provisioning - Cloud Hyperscaler
+# Only for use when 'ansible_to_terraform' is value provided for variable sap_vm_provision_iac_type
+####
+
+# sap_vm_provision_bastion_ssh_port: 50222
+# sap_vm_provision_bastion_user: ""
+
+# sap_vm_provision_bastion_os_image: ""
+
+# sap_vm_provision_resource_prefix: ""
+
+# sap_vm_provision_terraform_state: "" # present, absent
+# sap_vm_provision_terraform_work_dir_path: "/tmp/tf1"
+
+
+####
+# VM Provision - Generic configuration
+####
+
+# Refers to a given plan in sap_vm_provision_<>_host_specifications_dictionary
+# See defaults for example
+# e.g. example_host_specification_plan
+sap_vm_provision_host_specification_plan: ""
+
+# Desired FQDN for SAP Hosts; for Cloud Hyperscaler the domain must exist from a Private DNS service
+# e.g. poc.internal
+sap_vm_provision_dns_root_domain: ""
+
+
+####
+# VM Provision - Generic configuration - Hypervsior
+####
+
+# Register to OS Vendor online subscription to package repositories
+sap_vm_provision_os_online_registration_passcode: ""
+sap_vm_provision_os_online_registration_user: ""
+
+# Register to Hosted Mirror of OS Vendor package repositories
+# sap_vm_provision_os_registration_ca_file_path: ""
+# sap_vm_provision_os_registration_script_command: ""
+
+# Proxy - Web Forward, when SNAT not available for Hypervisor VMs
+# sap_vm_provision_proxy_web_forward_proxy_ip: "" # IP:Port only, no http:// prefix
+# sap_vm_provision_proxy_web_forward_exclusions: "localhost,127.0.0.1,{{ sap_vm_provision_dns_root_domain }}"
+
+
+####
+# VM Provision - Generic configuration - NFS for multiple hosts
+####
+
+sap_vm_provision_nfs_mount_point: "" # e.g. NFS_IP:/NFS_MOUNT_PATH
+sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: ""
+
+sap_vm_provision_nfs_mount_point_type: "" # e.g. nfs, nfs4
+sap_vm_provision_nfs_mount_point_opts: ""
+
+
+####
+# VM Provision - dynamic inventory variables
+# Cannot be given a value by end user, the Ansible Role populates the variable
+# After sap_vm_provision has executed successfully, these variables can easily be used to populate other Ansible Role variables (e.g. sap_swpm_db_host)
+####
+
+# sap_vm_provision_dynamic_inventory_anydb_primary_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_anydb_secondary_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_hana_primary_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_hana_secondary_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_ascs_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_ers_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_pas_hostname: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_aas_hostname: "{{ None }}"
+
+# sap_vm_provision_dynamic_inventory_anydb_primary_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_anydb_secondary_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_hana_primary_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_hana_secondary_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_ascs_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_ers_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_pas_ip: "{{ None }}"
+# sap_vm_provision_dynamic_inventory_nw_aas_ip: "{{ None }}"
+
+
+####
+# Infrastructure Platform - Cloud Hyperscaler - Credentials and Configuration
+####
+
+# AWS
+sap_vm_provision_aws_access_key: ""
+sap_vm_provision_aws_secret_access_key: ""
+sap_vm_provision_aws_region: "{{ sap_vm_provision_aws_vpc_availability_zone[:-1] }}"
+sap_vm_provision_aws_vpc_availability_zone: ""
+sap_vm_provision_aws_vpc_subnet_id: "" # if ansible_to_terraform, use "new"
+sap_vm_provision_aws_vpc_subnet_create_boolean: "{{ true | default(false) if sap_vm_provision_aws_vpc_subnet_id == 'new' else false }}"
+sap_vm_provision_aws_vpc_sg_names: "" # comma-separated, if ansible_to_terraform then ignore this variable
+sap_vm_provision_aws_key_pair_name_ssh_host_public_key: ""
+
+# Google Cloud
+sap_vm_provision_gcp_credentials_json: ""
+sap_vm_provision_gcp_project: ""
+sap_vm_provision_gcp_region: "{{ sap_vm_provision_gcp_region_zone[:-2] }}"
+sap_vm_provision_gcp_region_zone: ""
+sap_vm_provision_gcp_vpc_name: ""
+sap_vm_provision_gcp_vpc_subnet_name: ""
+
+# IBM Cloud
+sap_vm_provision_ibmcloud_api_key: ""
+sap_vm_provision_ibmcloud_resource_group_name: ""
+sap_vm_provision_ibmcloud_region: "{{ sap_vm_provision_ibmcloud_availability_zone | regex_replace('-[0-9]', '') }}"
+sap_vm_provision_ibmcloud_availability_zone: ""
+sap_vm_provision_ibmcloud_private_dns_instance_name: ""
+sap_vm_provision_ibmcloud_vpc_name: ""
+sap_vm_provision_ibmcloud_vpc_subnet_name: ""
+sap_vm_provision_ibmcloud_vpc_sg_names: "" # comma-separated, if ansible_to_terraform then ignore this variable
+sap_vm_provision_ibmcloud_key_pair_name_ssh_host_public_key: ""
+
+# IBM Cloud, addendum for IBM Power VS
+sap_vm_provision_ibmcloud_powervs_location: ""
+sap_vm_provision_ibmcloud_powervs_workspace_name: ""
+sap_vm_provision_ibmcloud_powervs_vlan_subnet_name: ""
+sap_vm_provision_ibmcloud_powervs_key_pair_name_ssh_host_public_key: ""
+
+# MS Azure
+sap_vm_provision_msazure_subscription_id: ""
+sap_vm_provision_msazure_tenant_id: ""
+sap_vm_provision_msazure_app_client_id: ""
+sap_vm_provision_msazure_app_client_secret: ""
+sap_vm_provision_msazure_resource_group_name: ""
+sap_vm_provision_msazure_location_region: ""
+sap_vm_provision_msazure_location_availability_zone_no: 1
+sap_vm_provision_msazure_vnet_name: ""
+sap_vm_provision_msazure_vnet_subnet_name: ""
+
+
+####
+# Infrastructure Platform - Hypervisor - Credentials and Configuration
+####
+
+# IBM PowerVM
+sap_vm_provision_ibmpowervm_vc_auth_endpoint: "" # e.g https://POWERVC_HOST:5000/v3/
+sap_vm_provision_ibmpowervm_vc_user: ""
+sap_vm_provision_ibmpowervm_vc_user_password: ""
+sap_vm_provision_ibmpowervm_vc_project_name: ""
+sap_vm_provision_ibmpowervm_host_group_name: ""
+sap_vm_provision_ibmpowervm_host_group_shared_procesor_pool_name: ""
+sap_vm_provision_ibmpowervm_network_name: ""
+sap_vm_provision_ibmpowervm_network_vnic_type: "normal" # 'direct' == SR-IOV, 'normal' == Shared Ethernet Adapter (SEA)
+sap_vm_provision_ibmpowervm_storage_template_name: "" # aka. Openstack Cinder Volume Type
+sap_vm_provision_ibmpowervm_key_pair_name_ssh_host_public_key: ""
+
+# Kubevirt
+sap_vm_provision_kubevirt_api_key: ""
+sap_vm_provision_kubevirt_cluster_url: ""
+sap_vm_provision_kubevirt_vm_host_os_image_url: "" # e.g. docker://registry.redhat.io/rhel8/rhel-guest-image:8.6.0
+sap_vm_provision_kubevirt_os_user: ""
+sap_vm_provision_kubevirt_os_user_password: ""
+sap_vm_provision_kubevirt_target_namespace: ""
+
+# OVirt
+sap_vm_provision_ovirt_engine_cafile: ""
+sap_vm_provision_ovirt_engine_fqdn: ""
+sap_vm_provision_ovirt_engine_insecure_bool: true
+sap_vm_provision_ovirt_engine_password: ""
+sap_vm_provision_ovirt_engine_url: ""
+sap_vm_provision_ovirt_engine_user: ""
+sap_vm_provision_ovirt_hypervisor_cluster_host_node_name: ""
+sap_vm_provision_ovirt_hypervisor_cluster_name: ""
+sap_vm_provision_ovirt_hypervisor_cluster_storage_domain_name: ""
+
+# VMware
+sap_vm_provision_vmware_vcenter_hostname: ""
+sap_vm_provision_vmware_vcenter_validate_certs_bool: false
+sap_vm_provision_vmware_vcenter_user: ""
+sap_vm_provision_vmware_vcenter_password: ""
+sap_vm_provision_vmware_vm_folder_name: ""
+sap_vm_provision_vmware_vm_cluster_name: ""
+sap_vm_provision_vmware_vm_cluster_host_name: ""
+sap_vm_provision_vmware_vm_cluster_datastore_name: ""
+
+
+####
+# OS Images
+####
+
+### Select an OS Image in sap_vm_provision_<>_host_os_image_dictionary
+# e.g. sles-15-3-sap
+
+# AWS EC2 Virtual Server
+sap_vm_provision_aws_ec2_vs_host_os_image: ""
+
+# Google Cloud Compute Engine Virtual Machine
+sap_vm_provision_gcp_ce_vm_host_os_image: ""
+
+# IBM Cloud Virtual Server
+sap_vm_provision_ibmcloud_vs_host_os_image: ""
+
+# IBM Cloud, IBM Power VS
+sap_vm_provision_ibmcloud_powervs_host_os_image: ""
+
+# MS Azure Virtual Machine
+sap_vm_provision_msazure_vm_host_os_image: ""
+
+# IBM PowerVM Virtual Machine (see IBM PowerVC Web GUI for list)
+sap_vm_provision_ibmpowervm_vm_host_os_image: ""
+
+
+# OS Images - AWS AMI
+sap_vm_provision_aws_ec2_vs_host_os_image_dictionary:
+ rhel-8-1: "*RHEL-8.1*_HVM*x86_64*"
+ rhel-8-2: "*RHEL-8.2*_HVM*x86_64*"
+ rhel-8-4: "*RHEL-8.4*_HVM*x86_64*"
+ rhel-8-6: "*RHEL-8.6*_HVM*x86_64*"
+ rhel-7-7-sap-ha: "*RHEL-SAP-7.7*"
+ rhel-7-9-sap-ha: "*RHEL-SAP-7.9*"
+ rhel-8-1-sap-ha: "*RHEL-SAP-8.1.0*"
+ rhel-8-2-sap-ha: "*RHEL-SAP-8.2.0*"
+ rhel-8-4-sap-ha: "*RHEL-SAP-8.4.0*"
+ rhel-8-6-sap-ha: "*RHEL-SAP-8.6.0*"
+ sles-15-2: "*suse-sles-15-sp2-v202*-hvm-ssd-x86_64*"
+ sles-15-3: "*suse-sles-15-sp3-v202*-hvm-ssd-x86_64*"
+ sles-15-4: "*suse-sles-15-sp4-v202*-hvm-ssd-x86_64*"
+ sles-12-5-sap: "*suse-sles-sap-12-sp5-v202*-hvm-ssd-x86_64*"
+ sles-15-1-sap: "*suse-sles-sap-15-sp1-v202*-hvm-ssd-x86_64*"
+ sles-15-2-sap: "*suse-sles-sap-15-sp2-v202*-hvm-ssd-x86_64*"
+ sles-15-3-sap: "*suse-sles-sap-15-sp3-v202*-hvm-ssd-x86_64*"
+ sles-15-4-sap: "*suse-sles-sap-15-sp4-v202*-hvm-ssd-x86_64*"
+
+# OS Images - Google Cloud
+sap_vm_provision_gcp_ce_vm_host_os_image_dictionary:
+ rhel-8-latest:
+ project: "rhel-cloud"
+ family: "rhel-8"
+ rhel-7-7-sap-ha:
+ project: "rhel-sap-cloud"
+ family: "rhel-7-7-sap-ha"
+ rhel-7-9-sap-ha:
+ project: "rhel-sap-cloud"
+ family: "rhel-7-9-sap-ha"
+ rhel-8-1-sap-ha:
+ project: "rhel-sap-cloud"
+ family: "rhel-8-1-sap-ha"
+ rhel-8-2-sap-ha:
+ project: "rhel-sap-cloud"
+ family: "rhel-8-2-sap-ha"
+ rhel-8-4-sap-ha:
+ project: "rhel-sap-cloud"
+ family: "rhel-8-4-sap-ha"
+ rhel-8-6-sap-ha:
+ project: "rhel-sap-cloud"
+ family: "rhel-8-6-sap-ha"
+ sles-15-latest:
+ project: "suse-cloud"
+ family: "sles-15"
+ sles-15-sp3-sap:
+ project: "suse-sap-cloud"
+ family: "sles-15-sp3-sap"
+ sles-15-sp4-sap:
+ project: "suse-sap-cloud"
+ family: "sles-15-sp4-sap"
+ sles-15-sp5-sap:
+ project: "suse-sap-cloud"
+ family: "sles-15-sp5-sap"
+
+# OS Images - IBM Cloud
+sap_vm_provision_ibmcloud_vs_host_os_image_dictionary:
+ rhel-8-4: ".*redhat.*8-4.*minimal.*amd64.*"
+ rhel-8-6: ".*redhat.*8-6.*minimal.*amd64.*"
+ rhel-9-0: ".*redhat.*9-0.*minimal.*amd64.*"
+ rhel-7-9-sap-ha: ".*redhat.*7-9.*amd64.*hana.*"
+ rhel-8-4-sap-ha: ".*redhat.*8-4.*amd64.*hana.*"
+ rhel-8-6-sap-ha: ".*redhat.*8-6.*amd64.*hana.*"
+ sles-15-3-sap-ha: ".*sles.*15-3.*amd64.*hana.*"
+ sles-15-4-sap: ".*sles.*15-4.*amd64.*hana.*"
+ sles-15-5-sap: ".*sles.*15-5.*amd64.*hana.*"
+
+# OS Images - IBM Cloud, IBM Power VS 'Full Linux subscription' with support and activation keys
+sap_vm_provision_ibmcloud_powervs_host_os_image_dictionary:
+ rhel-8-4: ".*RHEL.*8.*4"
+ rhel-8-6: ".*RHEL.*8.*6"
+ rhel-9-2: ".*RHEL.*9.*2"
+ sles-15-3: ".*SLES.*15.*3"
+ sles-15-4: ".*SLES.*15.*4"
+ rhel-8-4-sap-ha: ".*RHEL.*8.*4.*SAP$" # ensure string suffix using $
+ rhel-8-6-sap-ha: ".*RHEL.*8.*6.*SAP$" # ensure string suffix using $
+ sles-15-2-sap: ".*SLES.*15.*2.*SAP$" # ensure string suffix using $
+ sles-15-3-sap: ".*SLES.*15.*3.*SAP$" # ensure string suffix using $
+ sles-15-4-sap: ".*SLES.*15.*4.*SAP$" # ensure string suffix using $
+
+# OS Images - MS Azure
+sap_vm_provision_msazure_vm_host_os_image_dictionary:
+ rhel-8-4:
+ publisher: "RedHat"
+ offer: "RHEL"
+ sku: "84-gen2"
+ rhel-8-1-sap-ha:
+ publisher: "RedHat"
+ offer: "RHEL-SAP-HA"
+ sku: "81sapha-gen2"
+ rhel-8-2-sap-ha:
+ publisher: "RedHat"
+ offer: "RHEL-SAP-HA"
+ sku: "82sapha-gen2"
+ rhel-8-4-sap-ha:
+ publisher: "RedHat"
+ offer: "RHEL-SAP-HA"
+ sku: "84sapha-gen2"
+ rhel-8-1-sap-applications:
+ publisher: "RedHat"
+ offer: "RHEL-SAP-HA"
+ sku: "81sapapps-gen2"
+ rhel-8-2-sap-applications:
+ publisher: "RedHat"
+ offer: "RHEL-SAP-HA"
+ sku: "82sapapps-gen2"
+ rhel-8-4-sap-applications:
+ publisher: "RedHat"
+ offer: "RHEL-SAP-HA"
+ sku: "84sapapps-gen2"
+ sles-15-sp3-sap:
+ publisher: "SUSE"
+ offer: "sles-sap-15-sp3"
+ sku: "gen2"
+ sles-15-sp4-sap:
+ publisher: "SUSE"
+ offer: "sles-sap-15-sp4"
+ sku: "gen2"
+ sles-15-sp5-sap:
+ publisher: "SUSE"
+ offer: "sles-sap-15-sp5"
+ sku: "gen2"
+
+
+####
+# Host Specification Plan
+####
+
+# AWS
+sap_vm_provision_aws_ec2_vs_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ virtual_machine_profile: r5.8xlarge
+ disable_ip_anti_spoofing: false
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: "" # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1
+ disk_size: 512 # size in GB, integer
+ disk_type: gp3 # default: gp3, for AWS EBS disk type
+ #disk_iops: # default: null
+
+
+# Google Cloud
+sap_vm_provision_gcp_ce_vm_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ virtual_machine_profile: n2-highmem-32
+ disable_ip_anti_spoofing: true
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1, if more then then LVM logical volume will be striped across the defined disks
+ disk_size: 512 # size in GB, integer
+ disk_type: pd-ssd # default: gp3, for AWS EBS disk type
+ #disk_iops: # default: null
+
+
+# IBM Cloud
+sap_vm_provision_ibmcloud_vs_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ virtual_machine_profile: mx2-32x256
+ disable_ip_anti_spoofing: true
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1, if more then then LVM logical volume will be striped across the defined disks
+ disk_size: 512 # size in GB, integer
+ disk_type: 10iops-tier # default: gp3, for AWS EBS disk type
+ #disk_iops: # default: null
+
+
+# IBM Cloud, IBM Power VS
+sap_vm_provision_ibmcloud_powervs_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ virtual_machine_profile: ush1-4x256 # alt: use custom SAP instance profile sizes using cnp-2x16 (2 Power Cores [16 vCPU] x 16GB) and above
+ disable_ip_anti_spoofing: true
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1, if more then then LVM logical volume will be striped across the defined disks
+ disk_size: 512 # size in GB, integer
+ disk_type: tier1
+
+
+# MS Azure
+sap_vm_provision_msazure_vm_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ virtual_machine_profile: Standard_M32ls
+ disable_ip_anti_spoofing: true
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1, if more then then LVM logical volume will be striped across the defined disks
+ disk_size: 512 # size in GB, integer
+ disk_type: P20 # default: gp3, for AWS EBS disk type
+ #disk_iops: # default: null
+
+
+# IBM PowerVM
+sap_vm_provision_ibmpowervm_vm_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ # SMT-8 (i.e. 8 CPU Threads per CPU Core) is used for SAP Software, except for Production systems of SAP HANA on IBM Power10 which uses SMT-4
+ ibmpowervm_vm_cpu_smt: 8
+ ibmpowervm_vm_cpu_threads: 32
+ ibmpowervm_vm_memory_gib: 256
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1
+ disk_size: 512 # size in GB, integer
+
+
+# KubeVirt
+sap_vm_provision_kubevirt_vm_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ # SMT-2 (i.e. 2 CPU Threads per CPU Core) is default for Intel CPU Hyper-Threading, optionally can be altered to SMT-1
+ kubevirt_vm_cpu_smt: 2
+ kubevirt_vm_cpu_threads: 32
+ kubevirt_vm_memory_gib: 256
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1
+ disk_size: 512 # size in GB, integer
+ disk_type: nas # KubeVirt Storage Clas
+
+
+# OVirt
+sap_vm_provision_ovirt_vm_boot_menu: false
+sap_vm_provision_ovirt_vm_clone_independent: false
+sap_vm_provision_ovirt_vm_disk_type: "raw" # default is 'cow' = thin provisioning
+sap_vm_provision_ovirt_vm_operating_system: "other_linux"
+sap_vm_provision_ovirt_vm_timezone: "Etc/GMT" # use to define UTC
+
+sap_vm_provision_ovirt_vm_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ ovirt_vm_cpu_threads: 32
+ ovirt_vm_memory_gib: 256
+ ovirt_vm_type: high_performance
+ ovirt_vm_placement_policy: pinned
+
+ ## optional, needed for kickstart installations when no DHCP is used
+ #ovirt_vm_ip:
+ #ovirt_vm_gw:
+ #ovirt_vm_netmask: 255.255.255.0
+ #ovirt_vm_interface: enp1s0
+
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1
+ disk_size: 512 # size in GB, integer
+
+# OVirt VM option 1 - create from VM Template Name
+sap_vm_provision_ovirt_vm_template_name: ""
+
+# OVirt VM option 2 - create from Kickstart definition
+# sap_vm_provision_ovirt_kickstart_host: ""
+# sap_vm_provision_ovirt_kickstart_config_file_name: ""
+# sap_vm_provision_ovirt_vm_kickstart_definition:
+# os_image_iso: RHEL-9.2.0-x86_64-dvd.iso
+# # pxeboot path on the hypervisor node(s)
+# initrd_path: /pxeboot/rhel-9.2/initrd.img
+# kernel_path: /pxeboot/rhel-9.2/vmlinuz
+# # see RHEL release documentations for kickstart installation kernel parameters
+# kernel_params: "inst.ks=http://{{ sap_vm_provision_ovirt_kickstart_host }}/kickstart/{{ sap_vm_provision_ovirt_kickstart_config_file_name }} ip={{ sap_vm_provision_ovirt_vm_host_specifications_dictionary[sap_vm_provision_host_specification_plan][inventory_hostname_short].ovirt_vm_ip }}::{{ sap_vm_provision_ovirt_vm_host_specifications_dictionary[sap_vm_provision_host_specification_plan][inventory_hostname_short].ovirt_vm_gw }}:{{ sap_vm_provision_ovirt_vm_host_specifications_dictionary[sap_vm_provision_host_specification_plan][inventory_hostname_short].ovirt_vm_netmask }}:{{ inventory_hostname }}:{{ sap_vm_provision_ovirt_vm_host_specifications_dictionary[sap_vm_provision_host_specification_plan][inventory_hostname_short].ovirt_vm_interface }}:none inst.nompath ipv6.disable=1 inst.repo=cdrom kpti=0"
+# boot_disk:
+# activate: true
+# bootable: true
+# interface: "virtio_scsi"
+# size: "25GiB"
+# format: "cow"
+
+# OVirt VM - vNIC definitions when not using DHCP
+# sap_vm_provision_ovirt_vm_nics:
+# - interface: virtio
+# name: nic1
+# profile_name: vm_nic_profile_name
+
+# OVirt VM - cloud-init
+# sap_vm_provision_ovirt_vm_cloud_init:
+# authorized_ssh_keys: "{{ lookup('file', ( sap_vm_provision_ssh_host_public_key_file_path | default(sap_vm_provision_ssh_host_private_key_file_path + '.pub') ) ) }}"
+# regenerate_ssh_keys: false
+# host_name: "{{ ansible_hostname }}" # Must use short name for SAP Systems, not FQDN
+# custom_script: | # Cloud-init script which will be executed on Virtual Machine when deployed. This is appended to the end of the cloud-init script generated by any other options.
+# write_files:
+# - content: |
+# Hello, world!
+# path: /tmp/greeting.txt
+# permissions: '0644'
+
+
+# VMware VM
+
+# VMware VM - create from VM Template in Content Library
+sap_vm_provision_vmware_vm_content_library_name: ""
+sap_vm_provision_vmware_vm_template_name: ""
+
+sap_vm_provision_vmware_vm_host_specifications_dictionary:
+ example_host_specification_plan:
+ host1: # Hostname, must be 13 characters or less
+ # SMT-2 (i.e. 2 CPU Threads per CPU Core) is default for Intel CPU Hyper-Threading, optionally can be altered to SMT-1
+ vmware_vm_cpu_smt: 2
+ vmware_vm_cpu_threads: 32
+ vmware_vm_memory_gib: 256
+ #sap_system_type: project_dev # project_dev, project_tst, project_prd
+ sap_host_type: hana_primary # hana_primary, hana_secondary, anydb_primary, anydb_secondary, nwas_ascs, nwas_ers, nwas_pas, nwas_aas
+ storage_definition:
+ - name: data_0
+ mountpoint: /data0
+ disk_count: 1 # default: 1
+ disk_size: 512 # size in GB, integer
+ disk_type: scsi # VMware Virtual Disk Node (scsi, ide, sata)
diff --git a/roles/sap_vm_provision/meta/main.yml b/roles/sap_vm_provision/meta/main.yml
new file mode 100644
index 0000000..27b72ab
--- /dev/null
+++ b/roles/sap_vm_provision/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ namespace: community
+ author: Sean Freeman
+ description: SAP VM Provision
+ company: IBM
+ license: Apache-2.0
+ min_ansible_version: 2.12
+ platforms:
+ - name: EL
+ versions: [8, 9]
+ galaxy_tags: ['sap', 'aws', 'gcp', 'msazure', 'ibmcloud', 'ibmpower', 'ovirt', 'kubevirt', 'vmware', 'rhel', 'redhat', 'sles', 'suse']
+dependencies: []
diff --git a/roles/sap_vm_provision/meta/runtime.yml b/roles/sap_vm_provision/meta/runtime.yml
new file mode 100644
index 0000000..c2ea658
--- /dev/null
+++ b/roles/sap_vm_provision/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.12.0'
diff --git a/roles/sap_vm_provision/tasks/common/register_os.yml b/roles/sap_vm_provision/tasks/common/register_os.yml
new file mode 100644
index 0000000..ce5bd58
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/register_os.yml
@@ -0,0 +1,92 @@
+---
+
+#### For On-Premise Package Repo Mirrors ####
+
+- name: Ansible Task block for RHEL Package Repositories setup
+ when:
+ - ansible_os_family == 'RedHat'
+ - sap_vm_provision_os_registration_ca_file_path is defined
+ - sap_vm_provision_os_registration_script_command is defined
+ - not sap_vm_provision_os_online_registration_user is defined
+ - not sap_vm_provision_os_online_registration_passcode is defined
+ block:
+
+ - name: Red Hat Package Repositories - Clean any existing Red Hat Subscription Manager data
+ ansible.builtin.command: /usr/sbin/subscription-manager clean
+
+ - name: Red Hat Package Repositories - Import CA file for Red Hat Satellite server
+ ansible.builtin.copy:
+ src: "{{ sap_vm_provision_os_registration_ca_file_path }}"
+ dest: /etc/pki/ca-trust/source/anchors
+ owner: root
+ group: root
+ mode: '0644'
+
+ - name: Red Hat Package Repositories - Update CA trust
+ ansible.builtin.command: update-ca-trust && update-ca-certificates
+
+ - name: Red Hat Package Repositories - Execute Registration Script to connect host to Red Hat Satellite
+ ansible.builtin.shell: "{{ sap_vm_provision_os_registration_script_command }}" # noqa: command-instead-of-shell
+ changed_when: true
+ # no_log: true
+
+ - name: Red Hat Package Repositories - Cleanup dnf repositories
+ ansible.builtin.command: dnf clean all
+
+
+- name: Ansible Task block for SLES Package Repositories setup
+ when:
+ - ansible_os_family == 'Suse'
+ - sap_vm_provision_os_registration_ca_file_path is defined
+ - sap_vm_provision_os_registration_script_command is defined
+ - not sap_vm_provision_os_online_registration_user is defined
+ - not sap_vm_provision_os_online_registration_passcode is defined
+
+ block:
+
+ - name: SUSE Package Repositories - Import CA file for SUSE RMT server
+ ansible.builtin.copy:
+ src: "{{ sap_vm_provision_os_registration_ca_file_path }}"
+ dest: /etc/pki/trust/anchors
+ owner: root
+ group: root
+ mode: '0644'
+
+ - name: SUSE Package Repositories - Update CA trust
+ ansible.builtin.command: update-ca-trust && update-ca-certificates
+
+ - name: SUSE Package Repositories - Execute Registration Script to connect host to SUSE RMT
+ ansible.builtin.shell: "{{ sap_vm_provision_os_registration_script_command }}" # noqa: command-instead-of-shell
+ changed_when: true
+ no_log: true
+
+
+#### For Online Registration via SNAT ####
+
+- name: Ansible Task block for RHEL Online Package Repositories setup
+ when:
+ - ansible_os_family == 'RedHat'
+ - not sap_vm_provision_os_registration_ca_file_path is defined
+ - not sap_vm_provision_os_registration_script_command is defined
+ - sap_vm_provision_os_online_registration_user is defined
+ - sap_vm_provision_os_online_registration_passcode is defined
+ block:
+
+ - name: Red Hat Customer Portal (RHCP) Online Package Repositories - Execute
+ ansible.builtin.command: subscription-manager register --auto-attach --username '{{ sap_vm_provision_os_online_registration_user }}' --password '{{ sap_vm_provision_os_online_registration_passcode }}'
+ ignore_errors: true
+
+
+- name: Ansible Task block for SLES Online Package Repositories setup
+ when:
+ - ansible_os_family == 'Suse'
+ - not sap_vm_provision_os_registration_ca_file_path is defined
+ - not sap_vm_provision_os_registration_script_command is defined
+ - sap_vm_provision_os_online_registration_user is defined
+ - sap_vm_provision_os_online_registration_passcode is defined
+ block:
+
+ - name: SUSE Customer Center (SCC) Online Package Repositories - Execute
+ ansible.builtin.command: SUSEConnect --email '{{ sap_vm_provision_os_online_registration_user }}' --regcode '{{ sap_vm_provision_os_online_registration_passcode }}'
+ ignore_errors: true
+ no_log: true
diff --git a/roles/sap_vm_provision/tasks/common/register_proxy.yml b/roles/sap_vm_provision/tasks/common/register_proxy.yml
new file mode 100644
index 0000000..4c89780
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/register_proxy.yml
@@ -0,0 +1,64 @@
+---
+
+#### For On-Premise hosts which use Web Forward Proxy instead of SNAT ####
+
+- name: Ansible Task block for Web Forward Proxy setup
+ when:
+ - sap_vm_provision_proxy_web_forward_proxy_ip is defined
+ block:
+
+ - name: Set var for non-interactive login shell on RHEL
+ ansible.builtin.set_fact:
+ non_interactive_login_shell_config_file: /root/.bashrc
+ when: ansible_os_family == "RedHat"
+
+ - name: Set var for non-interactive login shell on SLES
+ ansible.builtin.set_fact:
+ non_interactive_login_shell_config_file: /root/bash.bashrc
+ when: ansible_os_family == "Suse"
+
+ # Linux Pacemaker uses 'https_proxy' or 'HTTPS_PROXY' for proxy, and 'no_proxy' or 'NO_PROXY' for comma-separated exclusions list.
+ # The insecure HTTP env var 'http_proxy' and 'HTTP_PROXY' are not used.
+ # The exclusions list does not allow wildcard.
+ # Example of no_proxy:
+ ## localhost,127.0.0.1,hana-p,hana-s
+ # Examples of failure no_proxy:
+ ## localhost,127.0.0.1,*.domain.com
+ ## localhost,127.0.0.1,.domain.com
+ ## localhost,127.0.0.1,domain.com
+ ## localhost,127.0.0.1,hana-p.domain.com,hana-s.domain.com
+ ## localhost,127.0.0.1,SUBNET_RANGE_BLOCK
+ ## localhost,127.0.0.1,IP_ADDRESS_HANA_PRIMARY,IP_ADDRESS_HANA_SECONDARY
+
+ - name: Web Forward Proxy - Append all short hostnames to string for no_proxy exclusions list
+ ansible.builtin.set_fact:
+ sap_vm_provision_proxy_web_forward_exclusions_dynamic: "{{ groups_merged_list | join(',') }},{{ groups_merged_list | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | join(',') }}"
+
+ - name: Web Forward Proxy - Ensure non-interactive login shell config file exists
+ ansible.builtin.copy:
+ content: ""
+ dest: "{{ non_interactive_login_shell_config_file }}"
+ mode: '0644'
+ force: false
+
+ # For non-interactive login shell, append proxy env var to /root/.bashrc (proxy will not work if using /etc/bashrc or script stored in /etc/profile.d/)
+ - name: Web Forward Proxy - Append Proxy env var to non-interactive login shell config file
+ ansible.builtin.blockinfile:
+ path: "{{ non_interactive_login_shell_config_file }}"
+ marker: "#-- {mark} ANSIBLE MANAGED BLOCK --#" # must have {mark} otherwise block will repeat append on re-run
+ block: |
+ export http_proxy="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}"
+ export https_proxy="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}"
+ export HTTP_PROXY="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}"
+ export HTTPS_PROXY="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}"
+ #export ftp_proxy
+ #export FTP_PROXY
+ export no_proxy="{{ sap_vm_provision_proxy_web_forward_exclusions }},{{ sap_vm_provision_proxy_web_forward_exclusions_dynamic }}"
+ export NO_PROXY="{{ sap_vm_provision_proxy_web_forward_exclusions }},{{ sap_vm_provision_proxy_web_forward_exclusions_dynamic }}"
+
+ - name: Edit /etc/dnf/dnf.conf to add proxy
+ ansible.builtin.blockinfile:
+ path: /etc/dnf/dnf.conf
+ block: |
+ proxy=http://{{ sap_vm_provision_proxy_web_forward_proxy_ip }}
+ when: ansible_os_family == "RedHat"
diff --git a/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml b/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml
new file mode 100644
index 0000000..db3082f
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/set_ansible_vars.yml
@@ -0,0 +1,165 @@
+---
+
+- name: Set facts for all hosts - use facts from localhost - Generic
+ ansible.builtin.set_fact:
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+ sap_vm_provision_nfs_mount_point: "{{ sap_vm_provision_nfs_mount_point | default('') }}"
+ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir | default('') }}"
+ sap_id_user: "{{ sap_id_user }}"
+ sap_id_user_password: "{{ sap_id_user_password }}"
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_install_media_detect_source_directory: "{{ sap_software_download_directory }}"
+
+- name: Set facts for all hosts - use facts from localhost - Ansible only
+ ansible.builtin.set_fact:
+ sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ when:
+ - sap_vm_provision_iac_type == "ansible"
+
+- name: Set facts for all hosts - use facts from localhost - SAP HANA
+ ansible.builtin.set_fact:
+ sap_hana_sid: "{{ sap_hana_sid | default(sap_system_hana_db_sid) }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default(sap_system_hana_db_instance_nr) }}"
+ sap_hana_install_use_master_password: "y"
+ sap_hana_install_master_password: "{{ sap_hana_install_master_password }}"
+ sap_hana_install_software_directory: "{{ sap_software_download_directory }}"
+ when:
+ - (sap_hana_sid is defined or sap_system_hana_db_sid is defined) or (hostvars[inventory_hostname].vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][inventory_hostname].sap_system_hana_db_sid is defined)
+
+
+- name: Set facts for all hosts - use facts from localhost - SAP SWPM
+ ansible.builtin.set_fact:
+ sap_swpm_sid: "{{ sap_swpm_sid | default(sap_system_sid) | default('') }}"
+ sap_maintenance_planner_transaction_name: "{{ sap_maintenance_planner_transaction_name | default('') }}"
+ sap_swpm_templates_product_input: "{{ sap_swpm_templates_product_input | default('') }}"
+ sap_swpm_templates_product_input_prefix: "{{ sap_swpm_templates_product_input_prefix | default('') }}"
+ sap_swpm_ascs_instance_nr: "{{ sap_swpm_ascs_instance_nr | default(sap_system_nwas_abap_ascs_instance_nr) | default('') }}"
+ sap_swpm_pas_instance_nr: "{{ sap_swpm_pas_instance_nr | default(sap_system_nwas_abap_pas_instance_nr) | default('') }}"
+ sap_swpm_db_sid: "{{ sap_swpm_db_sid | default(sap_system_hana_db_sid) | default('') }}"
+ sap_swpm_db_instance_nr: "{{ sap_swpm_db_instance_nr | default(sap_system_hana_db_instance_nr) | default('') }}"
+ when:
+ - (sap_swpm_sid is defined or sap_system_sid is defined) or (hostvars[inventory_hostname].vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][inventory_hostname].sap_system_sid is defined)
+ - (sap_swpm_templates_install_dictionary is defined or sap_system_sid is defined) or hostvars[inventory_hostname].sap_swpm_templates_install_dictionary is defined
+
+
+- name: Set facts for all hosts - use facts from localhost - HA/DR - AWS
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_aws_region: "{{ sap_ha_pacemaker_cluster_aws_region }}"
+ sap_ha_pacemaker_cluster_aws_access_key_id: "{{ sap_ha_pacemaker_cluster_aws_access_key_id }}"
+ sap_ha_pacemaker_cluster_aws_secret_access_key: "{{ sap_ha_pacemaker_cluster_aws_secret_access_key }}"
+ sap_ha_pacemaker_cluster_aws_vip_update_rt: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+ when:
+ - sap_ha_pacemaker_cluster_aws_region is defined
+ - sap_vm_provision_iac_platform == "aws_ec2_vs"
+
+# - name: Set facts for all hosts - use facts from localhost - HA/DR - GCP
+# ansible.builtin.set_fact:
+# when:
+# - sap_ha_pacemaker_cluster_gcp_region_zone is defined
+# - sap_vm_provision_iac_type == "ansible"
+# - sap_vm_provision_iac_platform == "gcp_ce_vm"
+
+- name: Set facts for all hosts - use facts from localhost - HA/DR - IBM Cloud, IBM Power VS
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_ibmcloud_powervs_workspace_crn: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.crn }}"
+ sap_ha_pacemaker_cluster_ibmcloud_api_key: "{{ sap_ha_pacemaker_cluster_ibmcloud_api_key }}"
+ sap_ha_pacemaker_cluster_ibmcloud_region: "{{ list_ibmcloud_powervs_location_to_powervs_region[sap_vm_provision_ibmcloud_powervs_location] }}" # Lookup IBM Power VS Region from the given IBM Power VS Location during sap_vm_provision execution
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_api_key is defined
+ - sap_vm_provision_iac_platform == "ibmcloud_powervs"
+
+- name: Set facts for all hosts - use facts from localhost - HA/DR - IBM Cloud
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_ibmcloud_api_key: "{{ sap_ha_pacemaker_cluster_ibmcloud_api_key }}"
+ sap_ha_pacemaker_cluster_ibmcloud_region: "{{ sap_ha_pacemaker_cluster_ibmcloud_region }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - sap_vm_provision_iac_platform == "ibmcloud_vs"
+
+ # - name: Set facts for all hosts - use facts from localhost - HA/DR - MS Azure
+ # ansible.builtin.set_fact:
+ # when:
+ # - sap_ha_pacemaker_cluster_msazure_resource_group is defined
+ # - sap_vm_provision_iac_type == "ansible"
+ # - sap_vm_provision_iac_platform == "msazure_vm"
+
+# - name: Set facts for all hosts - use facts from localhost - HA/DR - IBM PowerVM
+# ansible.builtin.set_fact:
+# when:
+# - sap_ha_pacemaker_cluster_ibmpower_vm_hmc_host is defined
+# - sap_vm_provision_iac_type == "ansible"
+# - sap_vm_provision_iac_platform == "ibmpowervm_vm"
+
+# - name: Set facts for all hosts - use facts from localhost - HA/DR - KubeVirt
+# ansible.builtin.set_fact:
+# when:
+# - sap_ha_pacemaker_cluster___ is defined
+# - sap_vm_provision_iac_type == "ansible"
+# - sap_vm_provision_iac_platform == "kubevirt_vm"
+
+# - name: Set facts for all hosts - use facts from localhost - HA/DR - OVirt
+# ansible.builtin.set_fact:
+# when:
+# - sap_ha_pacemaker_cluster_aws_region is defined
+# - sap_vm_provision_iac_type == "ansible"
+# - sap_vm_provision_iac_platform == "ovirt_vm"
+
+# - name: Set facts for all hosts - use facts from localhost - HA/DR - VMware
+# ansible.builtin.set_fact:
+# when:
+# - sap_ha_pacemaker_cluster_aws_region is defined
+# - sap_vm_provision_iac_type == "ansible"
+# - sap_vm_provision_iac_platform == "vmware_vm"
+
+- name: Set facts for all hosts - IBM Power (ppc64le) only
+ ansible.builtin.set_fact:
+ sap_storage_setup_multipath_enable_and_detect: true
+ when:
+ - sap_vm_provision_iac_platform == "ibmpowervm_vm" or sap_vm_provision_iac_platform == "ibmcloud_powervs"
+
+
+# Required when defining Ansible Role variables within the host_specifications_dictionary for multiple SAP Systems / SAP Landscapes
+- name: Set facts for all hosts - use facts from localhost - SAP Variables from host_specifications_dictionary
+ ansible.builtin.set_fact:
+ "{{ host_spec_sap_item }}": "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname][host_spec_sap_item] }}"
+ loop: "{{ vars['sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan][inventory_hostname].keys() | map('regex_findall', '^sap_.*') | flatten | select() | list }}"
+ loop_control:
+ loop_var: host_spec_sap_item
+
+- name: Set facts for all hosts - use facts from localhost - Host Specifications Dictionary
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+
+# Set default to none, this will not set the var if the group does not exist
+- name: Set facts for all hosts - sap_vm_provision_dynamic_inventory_* hostname variables to identify hosts for other Ansible Roles
+ ansible.builtin.set_fact:
+ sap_vm_provision_dynamic_inventory_anydb_primary_hostname: "{{ hostvars[inventory_hostname].groups.anydb_primary[0] | default(none) }}"
+ sap_vm_provision_dynamic_inventory_anydb_secondary_hostname: "{{ hostvars[inventory_hostname].groups.anydb_secondary[0] | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_hana_primary_hostname: "{{ hostvars[inventory_hostname].groups.hana_primary[0] | default(none) }}"
+ sap_vm_provision_dynamic_inventory_hana_secondary_hostname: "{{ hostvars[inventory_hostname].groups.hana_secondary[0] | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_nw_ascs_hostname: "{{ hostvars[inventory_hostname].groups.nwas_ascs[0] | default(none) }}"
+ sap_vm_provision_dynamic_inventory_nw_ers_hostname: "{{ hostvars[inventory_hostname].groups.nwas_ers[0] | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_nw_pas_hostname: "{{ hostvars[inventory_hostname].groups.nwas_pas[0] | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_nw_aas_hostname: "{{ hostvars[inventory_hostname].groups.nwas_aas[0] | default(none) }}"
+
+
+# Set default to none, this will not set the var if the group does not exist
+- name: Set facts for all hosts - sap_vm_provision_dynamic_inventory_* IP Address variables to identify hosts for other Ansible Roles
+ ansible.builtin.set_fact:
+ sap_vm_provision_dynamic_inventory_anydb_primary_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_anydb_primary_hostname].ansible_host | default(none) }}"
+ sap_vm_provision_dynamic_inventory_anydb_secondary_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_anydb_secondary_hostname].ansible_host | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_hana_primary_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_hana_primary_hostname].ansible_host | default(none) }}"
+ sap_vm_provision_dynamic_inventory_hana_secondary_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_hana_secondary_hostname].ansible_host | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_nw_ascs_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_nw_ascs_hostname].ansible_host | default(none) }}"
+ sap_vm_provision_dynamic_inventory_nw_ers_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_nw_ers_hostname].ansible_host | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_nw_pas_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_nw_pas_hostname].ansible_host | default(none) }}"
+
+ sap_vm_provision_dynamic_inventory_nw_aas_ip: "{{ hostvars[sap_vm_provision_dynamic_inventory_nw_aas_hostname].ansible_host | default(none) }}"
diff --git a/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml b/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml
new file mode 100644
index 0000000..d1f0393
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/set_ansible_vars_storage.yml
@@ -0,0 +1,42 @@
+---
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact for storage setup when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ host_node_scaleout_origin_spec: "{{ ansible_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in vars[ansible_prompt_iac_platform_choice + '_host_specifications_dictionary'][sap_vm_provision_host_specification_plan].keys()
+
+# Use inventory_hostname_short to retrieve host specification from the dictionary. While ansible_hostname will work for Ansible only, using Ansible>Terraform may see ansible_hostname as 'localhost' and fail
+# For end user ease of use, the host specifications dictionary uses disk_count to indicate how many disks will be provisioned
+# However the sap_storage_setup Ansible Role can not detect disk_count, and requires the key to be renamed lvm_lv_stripes
+- name: Convert sap_vm_provision_*_host_specifications_dictionary.storage_definition to sap_storage_setup.sap_storage_setup_definition
+ ansible.builtin.set_fact:
+ sap_storage_setup_definition: "{{ sap_storage_setup_definition | default([]) + [converted_element] }}"
+ vars:
+ converted_element: |
+ {% set current_element = (convert_item | dict2items) %}
+ {% set new_element = [] %}
+ {% for entry in current_element %}
+ {%- if "disk_count" in entry.key %}
+ {%- set conv = new_element.extend([
+ {
+ 'key': 'lvm_lv_stripes',
+ 'value': entry.value,
+ }
+ ]) %}
+ {%- elif not "disk_type" in entry.key %}
+ {%- set add_entry = new_element.extend([
+ {
+ 'key': entry.key,
+ 'value': entry.value,
+ }
+ ]) %}
+ {%- endif -%}
+ {% endfor %}
+ {{ new_element | items2dict }}
+ loop: "{{ host_specifications_dictionary[sap_vm_provision_host_specification_plan][host_node_scaleout_origin_spec | default(inventory_hostname_short)].storage_definition | list }}"
+ loop_control:
+ loop_var: convert_item
+ label: "{{ convert_item.name }}"
diff --git a/roles/sap_vm_provision/tasks/common/set_etc_hosts.yml b/roles/sap_vm_provision/tasks/common/set_etc_hosts.yml
new file mode 100644
index 0000000..5b64f03
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/set_etc_hosts.yml
@@ -0,0 +1,97 @@
+---
+
+# Ensure SAP AnyDB, SAP HANA or SAP NetWeaver hostname is not localhost in /etc/hosts. See SAP Note 1054467 - Local host name refers to loopback address
+
+- name: Ansible Play for updating /etc/hosts file before SAP software installations
+ block:
+
+ # First remove entries of the host
+ # The separate removal task allows cleanup of multiple lines
+ - name: Clean old host information from /etc/hosts
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ regexp: '^({{ ansible_host }}\s+)'
+ state: absent
+
+ - name: Add new entry to /etc/hosts and use /etc/hosts to set domain name of host
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ line: "{{ ansible_host }}\t{{ inventory_hostname_short }}.{{ sap_vm_provision_dns_root_domain }}\t{{ inventory_hostname_short }}"
+
+ - name: Add new entry to /etc/hosts and use /etc/hosts to set domain name of host - IBM Power
+ ansible.builtin.lineinfile:
+ path: /etc/hosts
+ line: "{{ ansible_default_ipv4.address }}\t{{ inventory_hostname_short }}.{{ sap_vm_provision_dns_root_domain }}\t{{ inventory_hostname_short }}"
+ when: sap_vm_provision_iac_platform == "ibmpowervm_vm" or sap_vm_provision_iac_platform == "ovirt"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps and activate Ansible Special Variables
+ # such as ansible_domain and ansible_fqdn
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+
+ - name: Update /etc/hosts file when single sandbox host (hana_primary)
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_hana_primary_ip }}\t{{ sap_vm_provision_dynamic_inventory_hana_primary_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_hana_primary_hostname }}"
+ state: present
+ when:
+ - groups["hana_primary"] is defined and inventory_hostname_short in groups['hana_primary']
+ - not (ansible_play_hosts_all | length) > 1
+
+ - name: Update /etc/hosts file when single sandbox host (nwas_pas)
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_nw_pas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}"
+ when:
+ - groups["nwas_pas"] is defined and inventory_hostname_short in groups['nwas_pas']
+ - not (ansible_play_hosts_all | length) > 1
+
+
+ - name: Update /etc/hosts file for SAP HANA
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_hana_primary_ip }}\t{{ sap_vm_provision_dynamic_inventory_hana_primary_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_hana_primary_hostname }}"
+ state: present
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0))
+ - (ansible_play_hosts_all | length) > 1
+
+
+ - name: Update /etc/hosts file for SAP AnyDB
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_anydb_primary_ip }}\t{{ sap_vm_provision_dynamic_inventory_anydb_primary_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_anydb_primary_hostname }}"
+ state: present
+ when:
+ - (groups["anydb_primary"] is defined and (groups["anydb_primary"] | length>0))
+ - (ansible_play_hosts_all | length) > 1
+
+
+ - name: Update /etc/hosts file for SAP NetWeaver ASCS
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}"
+ state: present
+ when:
+ - (groups["nwas_ascs"] is defined and (groups["nwas_ascs"] | length>0))
+ - (ansible_play_hosts_all | length) > 1
+
+ - name: Update /etc/hosts file for SAP NetWeaver PAS
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_nw_pas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}"
+ state: present
+ when:
+ - (groups["nwas_pas"] is defined and (groups["nwas_pas"] | length>0))
+ - (ansible_play_hosts_all | length) > 1
+
+ - name: Update /etc/hosts file for SAP NetWeaver AAS
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_nw_aas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_aas_hostname }}.{{ sap_vm_provision_dns_root_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_aas_hostname }}"
+ state: present
+ when:
+ - (groups["nwas_aas"] is defined and (groups["nwas_aas"] | length>0))
+ - (ansible_play_hosts_all | length) > 1
diff --git a/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml b/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml
new file mode 100644
index 0000000..f87271a
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/set_etc_hosts_ha.yml
@@ -0,0 +1,161 @@
+---
+
+# Ensure SAP AnyDB, SAP HANA or SAP NetWeaver hostname is not localhost in /etc/hosts. See SAP Note 1054467 - Local host name refers to loopback address
+
+- name: Ansible Play for controlling execution to an Infrastructure Platform when High Availability is used
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps and activate Ansible Special Variables
+ # such as ansible_domain and ansible_fqdn
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+
+ - name: Update /etc/hosts file for SAP HANA Secondary node
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_hana_secondary_ip }}\t{{ sap_vm_provision_dynamic_inventory_hana_secondary_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_hana_secondary_hostname }}"
+ state: present
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0))
+
+ - name: Update /etc/hosts file for SAP NetWeaver ERS
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}"
+ state: present
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0))
+
+
+ - name: Update /etc/hosts file for SAP HANA HA
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ sap_vm_provision_dynamic_inventory_hana_primary_ip }}\t{{ sap_vm_provision_dynamic_inventory_hana_primary_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_hana_primary_hostname }}"
+ - "{{ sap_vm_provision_dynamic_inventory_hana_secondary_ip }}\t{{ sap_vm_provision_dynamic_inventory_hana_secondary_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_hana_secondary_hostname }}"
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0))
+
+ - name: Update /etc/hosts file with Virtual IPs for SAP HANA HA
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_db_host }}.{{ ansible_domain }}\t{{ sap_swpm_db_host }}"
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0))
+ - (not ansible_product_name == "Google Compute Engine" and not ansible_chassis_vendor == "Microsoft Corporation" and not ansible_chassis_asset_tag == 'ibmcloud') or ( (ansible_product_name == "Google Compute Engine" or ansible_chassis_vendor == "Microsoft Corporation" or ansible_chassis_asset_tag == 'ibmcloud') and (not inventory_hostname in groups["hana_primary"] or not inventory_hostname in groups["hana_secondary"]) )
+
+
+ - name: Update /etc/hosts file for SAP AnyDB HA
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ sap_vm_provision_dynamic_inventory_anydb_primary_ip }}\t{{ sap_vm_provision_dynamic_inventory_anydb_primary_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_anydb_primary_hostname }}"
+ - "{{ sap_vm_provision_dynamic_inventory_anydb_secondary_ip }}\t{{ sap_vm_provision_dynamic_inventory_anydb_secondary_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_anydb_secondary_hostname }}"
+ when:
+ - (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Update /etc/hosts file with Virtual IPs for SAP AnyDB HA
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_db_host }}.{{ ansible_domain }}\t{{ sap_swpm_db_host }}"
+ when:
+ - (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+
+ - name: Update /etc/hosts file for SAP NetWeaver HA
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ascs_hostname }}"
+ - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_ers_hostname }}"
+ - "{{ sap_vm_provision_dynamic_inventory_nw_pas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_pas_hostname }}"
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0))
+
+ - name: Update /etc/hosts file with Virtual IPs for SAP NetWeaver HA - ASCS / ERS
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}"
+ - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}"
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0))
+ - not ansible_chassis_asset_tag == 'ibmcloud' or ((not inventory_hostname_short in groups['nwas_ascs'] and not inventory_hostname_short in groups['nwas_ers']) and ansible_chassis_asset_tag == 'ibmcloud')
+
+ # - name: Update /etc/hosts file with Virtual IPs for SAP NetWeaver HA - PAS / AAS
+ # ansible.builtin.lineinfile:
+ # dest: /etc/hosts
+ # line: "{{ item }}"
+ # state: present
+ # loop:
+ # - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_pas_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_pas_instance_hostname }}"
+ # - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}\t{{ .sap_swpm_aas_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_aas_instance_hostname }}"
+ # when:
+ # - (groups["nwas_aas"] is defined and (groups["nwas_aas"] | length>0))
+ # - not ansible_chassis_asset_tag == 'ibmcloud' or ((not inventory_hostname_short in groups['nwas_pas'] and not inventory_hostname_short in groups['nwas_pas']) and ansible_chassis_asset_tag == 'ibmcloud')
+
+ - name: Update /etc/hosts file for SAP NetWeaver AAS
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ sap_vm_provision_dynamic_inventory_nw_aas_ip }}\t{{ sap_vm_provision_dynamic_inventory_nw_aas_hostname }}.{{ ansible_domain }}\t{{ sap_vm_provision_dynamic_inventory_nw_aas_hostname }}"
+ when:
+ - (groups["nwas_aas"] is defined and (groups["nwas_aas"] | length>0))
+
+
+# Ensure SAP AnyDB, SAP HANA or SAP NetWeaver hostname is not localhost in /etc/hosts. See SAP Note 1054467 - Local host name refers to loopback address.
+# However, as IBM Cloud Load Balancer is a secure design using Back-end Pool servers with singular Port Number and Front-end Listener with single Port Number,
+# and controls the Virtual IP from the Load Balancer. Therefore the Virtual IP is not added as a Secondary IP to the OS Network Interface,
+# which causes connectivity issues due to SAP NetWeaver instance random dynamic port usage.
+# As workaround, configure /etc/hosts to map Virtual Hostname to use the host IP Address instead of the Virtual IP Address
+- name: Ansible Play for controlling execution to an Infrastructure Platform when High Availability is used - IBM Cloud
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ - ansible_chassis_asset_tag == 'ibmcloud'
+ block:
+
+ - name: Update /etc/hosts file with Virtual Hostname for SAP NetWeaver HA ASCS on IBM Cloud
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ sap_vm_provision_dynamic_inventory_nw_ascs_ip }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}"
+ - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}"
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0))
+ - ansible_chassis_asset_tag == 'ibmcloud'
+ - inventory_hostname_short in groups['nwas_ascs']
+
+ - name: Update /etc/hosts file with Virtual Hostname for SAP NetWeaver HA ERS on IBM Cloud
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}\t{{ sap_swpm_ascs_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ascs_instance_hostname }}"
+ - "{{ sap_vm_provision_dynamic_inventory_nw_ers_ip }}\t{{ sap_swpm_ers_instance_hostname }}.{{ ansible_domain }}\t{{ sap_swpm_ers_instance_hostname }}"
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0))
+ - ansible_chassis_asset_tag == 'ibmcloud'
+ - inventory_hostname_short in groups['nwas_ers']
diff --git a/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml b/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml
new file mode 100644
index 0000000..506259d
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/common/set_etc_hosts_scaleout.yml
@@ -0,0 +1,62 @@
+---
+
+# Ensure SAP AnyDB, SAP HANA or SAP NetWeaver hostname is not localhost in /etc/hosts. See SAP Note 1054467 - Local host name refers to loopback address
+
+- name: Ansible Play for controlling execution to an Infrastructure Platform when SAP HANA Scale-Out is used
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps and activate Ansible Special Variables
+ # such as ansible_domain and ansible_fqdn
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+
+ - name: Update /etc/hosts file for SAP HANA Scale-Out Active Parent
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "# SAP HANA scale-out parent\n{{ hostvars[(groups['hana_primary'] | first)]['ansible_host'] }}\t{{ hostvars[(groups['hana_primary'] | first)]['ansible_fqdn'] }}\t{{ hostvars[(groups['hana_primary'] | first)]['inventory_hostname_short'] }}"
+ loop_control:
+ label: "{{ inventory_hostname_short }}"
+ when: not inventory_hostname_short == hostvars[(groups['hana_primary'] | first)]['inventory_hostname_short']
+
+ - name: Update /etc/hosts file for SAP HANA Scale-Out Active Workers (no Standby)
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "# SAP HANA scale-out workers\n{% for host in (groups['hana_primary'] | reject('search', '0') | list) %}{% if (host != inventory_hostname_short) %}{{ hostvars[host]['ansible_host'] }}\t{{ hostvars[host]['ansible_fqdn'] }}\t{{ hostvars[host]['inventory_hostname_short'] }}\n{% endif %}{% endfor %}"
+ loop_control:
+ label: "{{ inventory_hostname_short }}"
+ when:
+ - (sap_hana_scaleout_standby == 0) or (not sap_hana_scaleout_standby is defined)
+
+ - name: Update /etc/hosts file for SAP HANA Scale-Out Active Workers (with Standby)
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "# SAP HANA scale-out workers\n{% for host in (groups['hana_primary'] | reject('search', '0') | list)[:-1] %}{% if (host != inventory_hostname_short) %}{{ hostvars[host]['ansible_host'] }}\t{{ hostvars[host]['ansible_fqdn'] }}\t{{ hostvars[host]['inventory_hostname_short'] }}\n{% endif %}{% endfor %}"
+ loop_control:
+ label: "{{ inventory_hostname_short }}"
+ when: sap_hana_scaleout_standby > 0
+
+ - name: Update /etc/hosts file for SAP HANA Scale-Out Standby
+ ansible.builtin.lineinfile:
+ dest: /etc/hosts
+ line: "{{ item }}"
+ state: present
+ loop:
+ - "# SAP HANA scale-out standby\n{{ hostvars[(groups['hana_primary'][-1])]['ansible_host'] }}\t{{ hostvars[(groups['hana_primary'][-1])]['ansible_fqdn'] }}\t{{ hostvars[(groups['hana_primary'][-1])]['inventory_hostname_short'] }}"
+ loop_control:
+ label: "{{ inventory_hostname_short }}"
+ when:
+ - sap_hana_scaleout_standby > 0
+ - not inventory_hostname_short == hostvars[(groups['hana_primary'] | last)]['inventory_hostname_short']
diff --git a/roles/sap_vm_provision/tasks/main.yml b/roles/sap_vm_provision/tasks/main.yml
new file mode 100644
index 0000000..0db4d41
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+
+#### Provision host/s for Deployment of SAP Software (as part of an SAP Software Solution Scenario e.g. SAP S/4HANA Distributed HA) ####
+
+- name: Begin execution
+ delegate_to: localhost
+ delegate_facts: false # keep facts with the original play hosts, not the delegated host
+ block:
+
+ - name: Execute to target {{ sap_vm_provision_iac_platform }} using {{ sap_vm_provision_iac_type }}
+ ansible.builtin.include_tasks: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_main.yml"
+ when: not sap_vm_provision_iac_post_deployment is defined or not sap_vm_provision_iac_post_deployment
+
+
+#### Post Deployment of SAP - tasks for GCP, IBM Cloud, MS Azure ####
+
+- name: Begin execution
+ delegate_to: localhost
+ delegate_facts: false # keep facts with the original play hosts, not the delegated host
+ block:
+
+ - name: Execute Post Deployment tasks for SAP on target {{ sap_vm_provision_iac_platform }} using {{ sap_vm_provision_iac_type }}
+ ansible.builtin.include_tasks: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/post_deployment_execute.yml"
+ when: sap_vm_provision_iac_post_deployment is defined and sap_vm_provision_iac_post_deployment
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml
new file mode 100644
index 0000000..cdebca1
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_main.yml
@@ -0,0 +1,138 @@
+---
+
+- name: Ansible Task block for looped provisioning of AWS EC2 instances
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ sap_vm_provision_aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ sap_vm_provision_aws_secret_access_key }}"
+ AWS_REGION: "{{ sap_vm_provision_aws_region }}"
+ block:
+
+ - name: Identify OS Image (AWS AMI)
+ register: register_aws_ami
+ amazon.aws.ec2_ami_info:
+ owners: ["aws-marketplace"]
+ filters:
+ name: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_aws_ec2_vs_host_os_image] }}"
+
+ - name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+ - name: Provision hosts to AWS
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ apply:
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ sap_vm_provision_aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ sap_vm_provision_aws_secret_access_key }}"
+ AWS_REGION: "{{ sap_vm_provision_aws_region }}"
+
+ - name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].instances[0].private_ip_address }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Gather information about AWS VPC Route Table for the VPC Subnet
+ amazon.aws.ec2_vpc_route_table_info:
+ filters:
+ association.subnet-id: "{{ sap_vm_provision_aws_vpc_subnet_id }}"
+ register: aws_vpc_subnet_rt_info
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ - name: Ansible AWS Route53 DNS Records for hosts
+ amazon.aws.route53:
+ state: present
+ private_zone: true
+ zone: "{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}"
+ record: "{{ inventory_hostname }}.{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}"
+ type: A
+ ttl: 7200
+ value: "{{ hostvars[inventory_hostname].ansible_host }}"
+ wait: true
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+
+- name: Ansible Task block for provisioning of High Availability resources for AWS EC2 instances
+ delegate_to: localhost
+ run_once: true
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ sap_vm_provision_aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ sap_vm_provision_aws_secret_access_key }}"
+ AWS_REGION: "{{ sap_vm_provision_aws_region }}"
+ when:
+ - sap_ha_pacemaker_cluster_aws_region is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Provision High Availability resources for AWS EC2 hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_setup_ha.yml"
+ apply:
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ sap_vm_provision_aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ sap_vm_provision_aws_secret_access_key }}"
+ AWS_REGION: "{{ sap_vm_provision_aws_region }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml
new file mode 100644
index 0000000..fb1f484
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_provision.yml
@@ -0,0 +1,183 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+- name: Provision AWS EC2 Virtual Server instance
+ register: register_provisioned_host_single
+ amazon.aws.ec2_instance:
+ state: started
+ name: "{{ inventory_hostname }}"
+ image_id: "{{ (register_aws_ami.images | sort(attribute='creation_date') | last).image_id }}"
+ instance_type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}"
+ key_name: "{{ sap_vm_provision_aws_key_pair_name_ssh_host_public_key }}"
+ security_groups: "{{ sap_vm_provision_aws_vpc_sg_names }}"
+ vpc_subnet_id: "{{ sap_vm_provision_aws_vpc_subnet_id }}"
+ tenancy: default
+ metadata_options:
+ http_endpoint: enabled
+# http_put_response_hop_limit: 8
+ http_tokens: optional # IMDSv1 = optional, IMDSv2 = required
+# instance_metadata_tags: disabled
+ network:
+ assign_public_ip: false
+ source_dest_check: "{{ not lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # Disable the Anti IP Spoofing by setting Source/Destination Check to false
+
+- name: Set fact for storage volume letters calculations (max 25 volumes)
+ ansible.builtin.set_fact:
+ storage_vol_letters: "bcdefghijklmnopqrstuvwxyz"
+
+- name: Read AWS EC2 instance information
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:Name": "{{ inventory_hostname }}"
+ "instance-state-name": ["running"]
+ register: instance_info
+
+- name: Set fact for available storage volume device names
+ ansible.builtin.set_fact:
+ available_volumes: |-
+ {% set letters = 'bcdefghijklmnopqrstuvwxyz' %}
+ {% set volumes = [] %}
+ {%- for letter in letters -%}
+ {% for device in instance_info.instances[0].block_device_mappings -%}
+ {% if '/dev/sd' + letter not in device.device_name -%}
+ {% set dev = volumes.append('/dev/sd' + letter) %}
+ {%- endif %}
+ {%- endfor %}
+ {% endfor %}
+ {{ volumes | list | unique }}
+
+# Combination of only the filesystem volume information from the lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')
+# for volume device assignment.
+# This task assigns device names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ filesystem_volume_map: |
+ {% set volume_map = [] -%}
+ {% set av_vol = available_volumes -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = volume_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'device': av_vol[0],
+ 'fstype': storage_item.filesystem_type | default('xfs'),
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('gp3')
+ }
+ ]) %}
+ {%- set _ = av_vol.pop(0) -%}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ volume_map }}
+
+
+# The volume creation task requires the above task to define the parameter
+# which contains the calculated unique device names.
+- name: Provision AWS EBS volumes for AWS EC2 Virtual Server instance filesystems
+ amazon.aws.ec2_vol:
+ name: "{{ inventory_hostname }}-vol_{{ vol_item.name }}"
+ instance: "{{ register_provisioned_host_single.instance_ids[0] }}"
+ volume_type: "{{ vol_item.type }}"
+ volume_size: "{{ vol_item.size }}"
+ device_name: "{{ vol_item.device }}"
+ delete_on_termination: true
+ loop: "{{ filesystem_volume_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.fstype is defined
+ - vol_item.size > 0
+ register: volume_provisioning
+
+- name: Read AWS EC2 instance information
+ amazon.aws.ec2_instance_info:
+ filters:
+ "tag:Name": "{{ inventory_hostname }}"
+ register: instance_info
+
+- name: Add host facts
+ ansible.builtin.set_fact:
+ filesystem_volume_map: "{{ filesystem_volume_map }}"
+ volume_provisioning: "{{ volume_provisioning }}"
+ instance_info: "{{ instance_info }}"
+ delegate_to: "{{ inventory_hostname }}"
+ delegate_facts: true
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single.instances[0].private_ip_address }}"
+
+
+- name: Copy facts to delegate host
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ delegate_sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ delegate_sap_vm_provision_bastion_public_ip: "{{ sap_vm_provision_bastion_public_ip }}"
+ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}"
+ delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ delegate_private_ip: "{{ register_provisioned_host_single.instances[0].private_ip_address }}"
+ delegate_hostname: "{{ inventory_hostname }}"
+ delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}"
+
+### begin block, parameters will be applied to each task within the block
+- name: Allow login from root OS User
+ remote_user: ec2-user
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ vars:
+ ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ block:
+
+ - name: Fix root authorized_keys entries
+ ansible.builtin.replace:
+ path: /root/.ssh/authorized_keys
+ backup: true
+ regexp: '(^.*ssh-rsa)'
+ replace: 'ssh-rsa'
+
+ - name: Permit root login
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '(^PermitRootLogin no)'
+ replace: 'PermitRootLogin yes'
+ register: sshd_config
+
+ - name: Reload sshd service
+ ansible.builtin.service:
+ name: sshd
+ state: reloaded
+ when:
+ - sshd_config.changed
+
+### end of block
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml
new file mode 100644
index 0000000..ec281c6
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/execute_setup_ha.yml
@@ -0,0 +1,443 @@
+---
+
+- name: Gather information about AWS account
+ amazon.aws.aws_caller_info:
+ register: aws_account_info
+
+- name: Gather information about AWS VPC Route Table for the VPC Subnet
+ amazon.aws.ec2_vpc_route_table_info:
+ filters:
+ association.subnet-id: "{{ sap_vm_provision_aws_vpc_subnet_id }}"
+ register: aws_vpc_subnet_rt_info
+
+
+- name: Ansible AWS VPC Route Table append route for SAP HANA HA
+ amazon.aws.ec2_vpc_route_table:
+ lookup: id
+ vpc_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].vpc_id }}"
+ route_table_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+ purge_subnets: false
+ purge_routes: false
+ state: present
+ routes:
+ - dest: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}"
+ instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+ loop: "{{ (groups['hana_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ register: aws_vpc_subnet_rt_route_sap_hana
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)
+
+- name: Ansible AWS Route53 DNS Records for SAP HANA HA Virtual Hostname
+ amazon.aws.route53:
+ state: present
+ private_zone: true
+ zone: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ record: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ type: A
+ ttl: 7200
+ value: "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+ wait: true
+ loop: "{{ (groups['hana_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+
+- name: Ansible AWS VPC Route Table append route for SAP AnyDB HA
+ amazon.aws.ec2_vpc_route_table:
+ lookup: id
+ vpc_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].vpc_id }}"
+ route_table_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+ purge_subnets: false
+ purge_routes: false
+ state: present
+ routes:
+ - dest: "{{ sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32') }}"
+ instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+ loop: "{{ (groups['anydb_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ register: aws_vpc_subnet_rt_route_sap_anydb
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0)
+
+- name: Ansible AWS Route53 DNS Records for SAP AnyDB HA Virtual Hostname
+ amazon.aws.route53:
+ state: present
+ private_zone: true
+ zone: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ record: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ type: A
+ ttl: 7200
+ value: "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+ wait: true
+ loop: "{{ (groups['anydb_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+
+- name: Ansible AWS VPC Route Table append route for SAP NetWeaver ASCS HA
+ amazon.aws.ec2_vpc_route_table:
+ lookup: id
+ vpc_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].vpc_id }}"
+ route_table_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+ purge_subnets: false
+ purge_routes: false
+ state: present
+ routes:
+ - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32') }}"
+ instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+ loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ register: aws_vpc_subnet_rt_route_sap_netweaver_ascs
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+- name: Ansible AWS Route53 DNS Records for SAP NetWeaver ASCS HA Virtual Hostname
+ amazon.aws.route53:
+ state: present
+ private_zone: true
+ zone: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ record: "{{ sap_swpm_ascs_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ type: A
+ ttl: 7200
+ value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}"
+ wait: true
+ loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+- name: Ansible AWS VPC Route Table append route for SAP NetWeaver ERS HA
+ amazon.aws.ec2_vpc_route_table:
+ lookup: id
+ vpc_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].vpc_id }}"
+ route_table_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+ purge_subnets: false
+ purge_routes: false
+ state: present
+ routes:
+ - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32') }}"
+ instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+ loop: "{{ (groups['nwas_ers'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ register: aws_vpc_subnet_rt_route_sap_netweaver_ers
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+- name: Ansible AWS Route53 DNS Records for SAP NetWeaver ERS HA Virtual Hostname
+ amazon.aws.route53:
+ state: present
+ private_zone: true
+ zone: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ record: "{{ sap_swpm_ers_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ type: A
+ ttl: 7200
+ value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}"
+ wait: true
+ loop: "{{ (groups['nwas_ers'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+## For HA of PAS and AAS, if required
+
+# - name: Ansible AWS VPC Route Table append route for SAP NetWeaver PAS HA
+# amazon.aws.ec2_vpc_route_table:
+# lookup: id
+# vpc_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].vpc_id }}"
+# route_table_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+# purge_subnets: false
+# purge_routes: false
+# state: present
+# routes:
+# - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32') }}"
+# instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+# loop: "{{ (groups['nwas_pas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: aws_vpc_subnet_rt_route_sap_netweaver_pas
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+# - name: Ansible AWS Route53 DNS Records for SAP NetWeaver PAS HA Virtual Hostname
+# amazon.aws.route53:
+# state: present
+# private_zone: true
+# zone: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+# record: "{{ sap_swpm_pas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+# type: A
+# ttl: 7200
+# value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}"
+# wait: true
+# loop: "{{ (groups['nwas_pas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# - name: Ansible AWS VPC Route Table append route for SAP NetWeaver AAS HA
+# amazon.aws.ec2_vpc_route_table:
+# lookup: id
+# vpc_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].vpc_id }}"
+# route_table_id: "{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+# purge_subnets: false
+# purge_routes: false
+# state: present
+# routes:
+# - dest: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32') }}"
+# instance_id: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+# loop: "{{ (groups['nwas_aas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: aws_vpc_subnet_rt_route_sap_netweaver_aas
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+# - name: Ansible AWS Route53 DNS Records for SAP NetWeaver AAS HA Virtual Hostname
+# amazon.aws.route53:
+# state: present
+# private_zone: true
+# zone: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+# record: "{{ sap_swpm_aas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+# type: A
+# ttl: 7200
+# value: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}"
+# wait: true
+# loop: "{{ (groups['nwas_aas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+- name: AWS IAM Role - HA-Role-Pacemaker
+ amazon.aws.iam_role:
+ name: "HA-Role-Pacemaker"
+ assume_role_policy_document: |
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "sts:AssumeRole",
+ "Sid": "",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ }
+ }
+ ]
+ }
+
+# AWS HA for SAP - DataProvider
+- name: AWS IAM Policy - HA-Policy-DataProvider
+ amazon.aws.iam_policy:
+ state: present
+ iam_type: role
+ iam_name: "HA-Role-Pacemaker"
+ policy_name: "HA-Policy-DataProvider"
+ policy_json: |
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "EC2:DescribeInstances",
+ "EC2:DescribeVolumes"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": "cloudwatch:GetMetricStatistics",
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": "s3:GetObject",
+ "Resource": "arn:aws:s3:::aws-sap-data-provider/config.properties"
+ }
+ ]
+ }
+
+# AWS HA for SAP - OverlayVirtualIPAgent
+- name: AWS IAM Policy - HA-Policy-OverlayVirtualIPAgent
+ amazon.aws.iam_policy:
+ state: present
+ iam_type: role
+ iam_name: "HA-Role-Pacemaker"
+ policy_name: "HA-Policy-OverlayVirtualIPAgent"
+ policy_json: |
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Stmt1424870324000",
+ "Effect": "Allow",
+ "Action": "ec2:DescribeRouteTables",
+ "Resource": "*"
+ },
+ {
+ "Sid": "Stmt1424860166260",
+ "Action": [
+ "ec2:CreateRoute",
+ "ec2:ReplaceRoute"
+ ],
+ "Effect": "Allow",
+ "Resource": "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ aws_account_info.account }}:route-table/{{ aws_vpc_subnet_rt_info.route_tables[0].route_table_id }}"
+ }
+ ]
+ }
+
+# AWS HA for SAP - STONITH of SAP HANA
+- name: AWS IAM Policy - HA-Policy-STONITH-SAPHANA
+ amazon.aws.iam_policy:
+ state: present
+ iam_type: role
+ iam_name: "HA-Role-Pacemaker"
+ policy_name: "HA-Policy-STONITH-SAPHANA"
+ policy_json: |
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Stmt1424870324000",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstances",
+ "ec2:DescribeInstanceAttribute",
+ "ec2:DescribeTags"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Stmt1424870324001",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:ModifyInstanceAttribute",
+ "ec2:StartInstances",
+ "ec2:StopInstances"
+ ],
+ "Resource": [
+ "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ aws_account_info.account }}:instance/{{ hostvars[groups['hana_primary'][0]].ansible_host }}",
+ "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ aws_account_info.account }}:instance/{{ hostvars[groups['hana_secondary'][0]].ansible_host }}"
+ ]
+ }
+ ]
+ }
+ when: groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+# AWS HA for SAP - STONITH of SAP NWAS
+- name: AWS IAM Policy - HA-Policy-STONITH-SAPNWAS
+ amazon.aws.iam_policy:
+ state: present
+ iam_type: role
+ iam_name: "HA-Role-Pacemaker"
+ policy_name: "HA-Policy-STONITH-SAPHANA"
+ policy_json: |
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "Stmt1424870324000",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeInstances",
+ "ec2:DescribeInstanceAttribute",
+ "ec2:DescribeTags"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Sid": "Stmt1424870324001",
+ "Effect": "Allow",
+ "Action": [
+ "ec2:ModifyInstanceAttribute",
+ "ec2:StartInstances",
+ "ec2:StopInstances"
+ ],
+ "Resource": [
+ "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ aws_account_info.account }}:instance/{{ hostvars[groups['nwas_ascs'][0]].ansible_host }}",
+ "arn:aws:ec2:{{ sap_vm_provision_aws_region }}:{{ aws_account_info.account }}:instance/{{ hostvars[groups['nwas_ers'][0]].ansible_host }}"
+ ]
+ }
+ ]
+ }
+ when: groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# Equivalent to
+# aws iam create-instance-profile --instance-profile-name "HA-Instance-Profile-Pacemaker-Cluster"
+# aws iam add-role-to-instance-profile --role-name "HA-Role-Pacemaker" --instance-profile-name "HA-Instance-Profile-Pacemaker-Cluster"
+- name: AWS IAM Instance Profile and attach AWS IAM Role - "HA-Instance-Profile-Pacemaker-Cluster"
+ amazon.aws.iam_instance_profile:
+ state: present
+ name: "HA-Instance-Profile-Pacemaker-Cluster"
+ role: "HA-Role-Pacemaker"
+ path: "/"
+
+# - name: AWS IAM Instance Profile - "HA-Instance-Profile-Pacemaker-Cluster"
+# ansible.builtin.command: aws iam create-instance-profile
+# --instance-profile-name "HA-Instance-Profile-Pacemaker-Cluster"
+# ignore_errors: true
+
+# - name: AWS IAM Instance Profile attach AWS IAM Role
+# ansible.builtin.command: aws iam add-role-to-instance-profile
+# --role-name "HA-Role-Pacemaker"
+# --instance-profile-name "HA-Instance-Profile-Pacemaker-Cluster"
+# ignore_errors: true
+
+# Equivalent to aws ec2 associate-iam-instance-profile --iam-instance-profile "Name=HA-Instance-Profile-Pacemaker-Cluster" --instance-id {{ hostvars[host_node].ansible_board_asset_tag }}
+- name: AWS EC2 Instances - attach AWS IAM Instance Profile for SAP HANA
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+ iam_instance_profile: "HA-Instance-Profile-Pacemaker-Cluster"
+ loop: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] ] | flatten | select() }}"
+ loop_control:
+ loop_var: host_node
+ when: groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ ignore_errors: true
+
+# - name: AWS EC2 Instances - attach AWS IAM Instance Profile for SAP HANA
+# ansible.builtin.command: aws ec2 associate-iam-instance-profile
+# --iam-instance-profile "Name=HA-Instance-Profile-Pacemaker-Cluster"
+# --instance-id {{ hostvars[host_node].ansible_board_asset_tag }}
+# loop: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] ] | flatten | select() }}"
+# loop_control:
+# loop_var: host_node
+# when: groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+# ignore_errors: true
+
+# Equivalent to aws ec2 associate-iam-instance-profile --iam-instance-profile "Name=HA-Instance-Profile-Pacemaker-Cluster" --instance-id {{ hostvars[host_node].ansible_board_asset_tag }}
+- name: AWS EC2 Instances - attach AWS IAM Instance Profile for SAP NetWeaver
+ amazon.aws.ec2_instance:
+ instance_ids: "{{ hostvars[host_node].ansible_board_asset_tag }}"
+ iam_instance_profile: "HA-Instance-Profile-Pacemaker-Cluster"
+ loop: "{{ [ [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+ loop_control:
+ loop_var: host_node
+ when: groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ ignore_errors: true
+
+# - name: AWS EC2 Instances - attach AWS IAM Instance Profile for SAP NetWeaver
+# ansible.builtin.command: aws ec2 associate-iam-instance-profile
+# --iam-instance-profile "Name=HA-Instance-Profile-Pacemaker-Cluster"
+# --instance-id {{ hostvars[host_node].ansible_board_asset_tag }}
+# loop: "{{ [ [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+# loop_control:
+# loop_var: host_node
+# when: groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+# ignore_errors: true
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/post_deployment_execute.yml
new file mode 100644
index 0000000..19c7341
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/aws_ec2_vs/post_deployment_execute.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Post Deployment notification
+ ansible.builtin.debug:
+ msg: "There are no Post Deployment tasks for SAP on this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml
new file mode 100644
index 0000000..e08a478
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_main.yml
@@ -0,0 +1,260 @@
+---
+
+- name: Ansible Task block for looped provisioning of Google Cloud CE VMs
+ environment:
+ GCP_AUTH_KIND: "serviceaccount"
+ GCP_SERVICE_ACCOUNT_FILE: "{{ sap_vm_provision_gcp_credentials_json }}"
+ block:
+
+ # # Must be GlobalOnly or ZonalPreferred. The default is ZonalOnly
+ # - name: GCP Project metadata - check VmDnsSetting variable
+
+ - name: Identify GCP OS Image
+ register: register_gcp_os_image
+ google.cloud.gcp_compute_image_info:
+ project: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_gcp_ce_vm_host_os_image].project }}"
+ filters:
+ - family = "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_gcp_ce_vm_host_os_image].family }}"
+ - -deprecated.state = DEPRECATED
+
+ - name: Identify GCP Network (VPC)
+ google.cloud.gcp_compute_network_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ filters:
+ - name = "{{ sap_vm_provision_gcp_vpc_name }}"
+ register: gcp_vpc_info
+
+ - name: Identify GCP Subnetwork (VPC Subnet)
+ google.cloud.gcp_compute_subnetwork_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ filters:
+ - name = "{{ sap_vm_provision_gcp_vpc_subnet_name }}"
+ register: gcp_vpc_subnet_info
+
+ - name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+ - name: Provision hosts to Google Cloud
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ apply:
+ environment:
+ GCP_AUTH_KIND: "serviceaccount"
+ GCP_SERVICE_ACCOUNT_FILE: "{{ sap_vm_provision_gcp_credentials_json }}"
+
+ - name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].networkInterfaces[0].networkIP }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ - name: Gather GCP VM information
+ google.cloud.gcp_compute_instance_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ filters:
+ - name = {{ inventory_hostname }}
+ register: gcp_vm_info
+
+ - name: Gather GCP VPC Subnet information
+ google.cloud.gcp_compute_subnetwork_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ filters:
+ - name = {{ sap_vm_provision_gcp_vpc_subnet_name }}
+ register: gcp_vpc_subnet_info
+
+ - name: Gather GCP Private DNS information
+ google.cloud.gcp_dns_managed_zone_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ dns_name: "{{ sap_vm_provision_dns_root_domain }}."
+ register: gcp_pdns_info
+
+ # - name: Gather information about GCP Router and table for the VPC Subnet
+ # google.cloud.gcp_compute_router_info:
+ # project: "{{ sap_vm_provision_gcp_project }}"
+ # region: "{{ sap_vm_provision_gcp_region }}"
+ # filters:
+ # - network = "{{ gcp_vpc_info.resources[0].selfLink }}"
+ # # - name = sap-vpc-router
+ # register: gcp_router_info
+
+ # - name: Verify IP Forwarding for GCP VMs
+ # ansible.builtin.fail:
+ # msg: GCP CE VM does not have IP Forwarding enabled
+ # when: not gcp_vm_info.resources[0].canIpForward
+
+ - name: GCP Private DNS Records for hosts
+ google.cloud.gcp_dns_resource_record_set:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ managed_zone:
+ name: "{{ gcp_pdns_info.resources[0].name }}"
+ dnsName: "{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}."
+ name: "{{ inventory_hostname }}.{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}."
+ target:
+ - "{{ hostvars[inventory_hostname].ansible_host }}"
+ type: A
+ ttl: 7200
+ register: gcp_pdns_records
+ until: not gcp_pdns_records.failed
+ retries: 5
+ delay: 5
+
+# - ansible.builtin.debug:
+# var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ # GCP OS Images are missing NetworkManager-config-server package, append NetworkManager config file to ensure DHCP is still used for GCP VM
+ # Primary IP Address by default uses subnet netmask /32 CIDR
+ - name: Ensure network configuration is persistent
+ ansible.builtin.shell: |
+ if grep -q rhel /etc/os-release
+ then
+ #### Override DNS auto configured based on DHCP response
+ #### Re-generate resolv.conf (/run/NetworkManager/resolv.conf and /etc/resolv.conf)
+ # Ignore Auto DNS
+ nmcli device modify eth0 ipv4.ignore-auto-dns yes
+ nmcli connection modify Wired\ connection\ 1 ipv4.ignore-auto-dns yes
+ # Ensure set to Google Cloud Private DNS (169.254.169.254 i.e. ns-gcp-private.googledomains.com)
+ nmcli device modify eth0 ipv4.dns 169.254.169.254
+ nmcli connection modify Wired\ connection\ 1 ipv4.dns 169.254.169.254
+ echo "supersede domain-name-servers 169.254.169.254;" >> /etc/dhcp/dhclient.conf
+ # Set DNS Search domains
+ nmcli device modify eth0 ipv4.dns-search {{ sap_vm_provision_dns_root_domain }},google.internal
+ nmcli connection modify Wired\ connection\ 1 ipv4.dns-search {{ sap_vm_provision_dns_root_domain }},google.internal
+ # Set Hostname and FQDN
+ nmcli device modify eth0 ipv4.dhcp-hostname ""
+ nmcli device modify eth0 ipv4.dhcp-fqdn {{ inventory_hostname }}.{{ sap_vm_provision_dns_root_domain }}
+ nmcli connection modify Wired\ connection\ 1 ipv4.dhcp-hostname ""
+ nmcli connection modify Wired\ connection\ 1 ipv4.dhcp-fqdn {{ inventory_hostname }}.{{ sap_vm_provision_dns_root_domain }}
+ #### Reset network interface for hostname and domain to set
+ # Reload RHEL Network Manager
+ systemctl reload NetworkManager
+ # Restart the connection to enact changes
+ # This will also re-populate /etc/hosts with records for the VM Primary IP and the Google Cloud Instance Metadata Service
+ nmcli connection reload && nmcli con down Wired\ connection\ 1 && nmcli con up Wired\ connection\ 1
+ fi
+ # when: ansible_os_family == 'RedHat' # when is evaluated on the localhost, not on the delegated host
+
+
+ # # GCP OS Images are missing NetworkManager-config-server package, append NetworkManager config file to ensure DHCP is still used for GCP VM
+ # - name: Ensure network configuration is persistent
+ # ansible.builtin.include_role:
+ # name: fedora.rhel-system-roles.network
+ # vars:
+ # network_provider: nm
+ # network_connections:
+ # - name: "{{ ansible_default_ipv4.alias }}"
+ # mac: "{{ ansible_default_ipv4.macaddress }}"
+ # interface_name: "{{ ansible_default_ipv4.interface }}"
+ # type: ethernet
+ # ip:
+ # dhcp4: true
+ # dhcp4_send_hostname: true
+ # when: ansible_os_family == 'RedHat' # when is evaluated on the localhost, not on the delegated host
+
+ # - name: Workaround - refresh OS Package Repo cache to avoid GCP custom package repo timeouts causing errors in RHEL package updates
+ # throttle: 1 # Spawn 1 worker only, forcing execute of shell commands to one host at a time and avoiding GCP package repo bandwidth restrictions
+ # ansible.builtin.shell: |
+ # yum clean all
+ # yum makecache
+ # #when: ansible_os_family == 'RedHat' # when is evaluated on the localhost, not on the delegated host
+
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+
+- name: Ansible Task block to execute on target inventory hosts - High Availability
+ delegate_to: "{{ inventory_hostname }}"
+ when:
+ - sap_ha_pacemaker_cluster_gcp_region_zone is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Stop firewalld on all hosts before setup of Google Cloud Load Balancer
+ ansible.builtin.systemd:
+ name: firewalld
+ state: stopped
+ enabled: false
+
+
+- name: Ansible Task block for looped provisioning of High Availability resources for Google Cloud CE VMs
+ delegate_to: localhost
+ run_once: true
+ environment:
+ GCP_AUTH_KIND: "serviceaccount"
+ GCP_SERVICE_ACCOUNT_FILE: "{{ sap_vm_provision_gcp_credentials_json }}"
+ when:
+ - sap_ha_pacemaker_cluster_gcp_region_zone is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Provision High Availability resources for GCP CE hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_setup_ha.yml"
+ apply:
+ environment:
+ GCP_AUTH_KIND: "serviceaccount"
+ GCP_SERVICE_ACCOUNT_FILE: "{{ sap_vm_provision_gcp_credentials_json }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml
new file mode 100644
index 0000000..bb4536c
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_provision.yml
@@ -0,0 +1,197 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+# Create flat list with names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ storage_disks_map: |
+ {% set disks_map = [] -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = disks_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('')
+ }
+ ]) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ disks_map }}
+
+
+### LIMITATION - Must provision disks first and attach to VM. It is not possible to provision disks after the VM is provisioned, see https://github.com/ansible-collections/google.cloud/issues/193
+# The volume creation task requires the above task to define the parameter
+# which contains the calculated unique device names.
+- name: Provision Google Cloud Persistent Disk volumes for Google Cloud VM filesystems
+ google.cloud.gcp_compute_disk:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ name: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}"
+ size_gb: "{{ vol_item.size }}"
+ loop: "{{ storage_disks_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.size > 0
+ register: volume_provisioning
+# failed_when: "(volume_provisioning.msg is defined) and ('already exists' not in volume_provisioning.msg)"
+
+
+# Create list of disks to attach to GCP VM
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ provisioned_disks_map: |
+ {% set disks_map = [
+ {
+ 'auto_delete': 'true',
+ 'boot': 'true',
+ 'interface': 'SCSI',
+ 'initialize_params': {
+ 'disk_type': 'pd-standard',
+ 'source_image': register_gcp_os_image.resources[0].selfLink
+ }
+ }
+ ] -%}
+ {% for storage_item in volume_provisioning.results -%}
+ {% set vol = disks_map.extend([
+ {
+ 'auto_delete': 'true',
+ 'boot': 'false',
+ 'interface': 'SCSI',
+ 'source': {
+ 'selfLink': storage_item.selfLink
+ }
+ }
+ ]) %}
+ {%- endfor %}
+ {{ disks_map }}
+
+
+- name: Provision Google Cloud VM
+ register: register_provisioned_host_single
+ google.cloud.gcp_compute_instance:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ name: "{{ inventory_hostname }}"
+ machine_type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}"
+ can_ip_forward: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Can IP Forward = true
+ network_interfaces:
+ - network:
+ selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+ subnetwork:
+ selfLink: "{{ gcp_vpc_subnet_info.resources[0].selfLink }}"
+ # 'NVME interface is only supported for confidential VMs or the following VM families: [a3-vm, c1-metal, c2-metal, c3-metal, c3-vm, c3d-vm, ct5p-vm, g2-vm, h3-vm, m3-vm, t2a-vm]
+ disks: "{{ provisioned_disks_map }}"
+ metadata:
+ enable-oslogin: false # Do not use GCP Project OS Login approach for SSH Keys
+ block-project-ssh-keys: true # Do not use GCP Project Metadata approach for SSH Keys
+ ssh-keys: "admin:{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}" # Uses the GCP VM Instance Metadata approach for SSH Keys. Shows in GCP Console GUI under 'SSH Keys' for the VM Instance. Can not use 'root' because SSH 'PermitRootLogin' by default is 'no'.
+ service_accounts: # List of service accounts authorized for Google Cloud VM (allow access via Instance Metadata service to computeMetadata/v1/instance/service-accounts etc for fence_gce Fencing Agent)
+ - email: "" # Empty string for service account name, will default to the "Compute Engine Default Service Account" for the GCP Project (e.g. xx-compute@developer.gserviceaccount.com)
+ scopes:
+ - "https://www.googleapis.com/auth/cloud-platform" # Allow full access to all Cloud APIs
+ # ["compute-rw", "storage-rw", "logging-write", "monitoring-write", "service-control", "service-management"]
+
+# Required as state: present on Ansible Module gcp_compute_instance does not allow for waiting until VM has booted
+- name: Wait 90 seconds for Google Cloud VM to boot
+ ansible.builtin.pause:
+ seconds: 90
+ prompt: ""
+ when: register_provisioned_host_single.changed
+
+- name: Read Google Cloud VM information
+ google.cloud.gcp_compute_instance_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ filters:
+ - name = {{ inventory_hostname }}
+ register: instance_info
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single.networkInterfaces[0].networkIP }}"
+
+
+- name: Copy facts to delegate host
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ delegate_sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ delegate_sap_vm_provision_bastion_public_ip: "{{ sap_vm_provision_bastion_public_ip }}"
+ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}"
+ delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ delegate_private_ip: "{{ register_provisioned_host_single.networkInterfaces[0].networkIP }}"
+ delegate_hostname: "{{ inventory_hostname }}"
+ delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}"
+
+
+### begin block, parameters will be applied to each task within the block
+- name: Allow login from root OS User
+ remote_user: admin
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ vars:
+ ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ block:
+
+ - name: Create .ssh directory for root user
+ ansible.builtin.file:
+ path: /root/.ssh
+ state: directory
+ mode: '0744'
+
+ - name: Create root authorized_keys file and entries
+ ansible.builtin.copy:
+ dest: /root/.ssh/authorized_keys
+ mode: '0600'
+ content: |
+ {{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}
+
+ - name: Permit root login
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '(^PermitRootLogin no)'
+ replace: 'PermitRootLogin yes'
+ register: sshd_config
+
+ - name: Reload sshd service
+ ansible.builtin.service:
+ name: sshd
+ state: reloaded
+ when:
+ - sshd_config.changed
+
+### end of block
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml
new file mode 100644
index 0000000..5d5e520
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/execute_setup_ha.yml
@@ -0,0 +1,767 @@
+---
+
+# Primary IP Address by default uses subnet netmask /32 CIDR
+# Virtual IP Address also uses subnet netmask /32 CIDR, otherwise subnet traffic attempts to route through the Load Balancer Backend Service
+
+# - name: GCP append route for SAP HANA HA (route must be outside of existing VPC Subnet Range/s)
+# google.cloud.gcp_compute_route:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# name: "{{ sap_swpm_db_host }}-vip"
+# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+# next_hop_instance:
+# selfLink: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}"
+# network:
+# selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+# loop: "{{ (groups['hana_primary'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: gcp_vpc_subnet_rt_route_sap_hana
+# when:
+# - groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)
+
+- name: GCP Private DNS Record for SAP HANA HA Virtual Hostname
+ google.cloud.gcp_dns_resource_record_set:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ managed_zone:
+ name: "{{ gcp_pdns_info.resources[0].name }}"
+ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ target:
+ - "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+ type: A
+ ttl: 7200
+ loop: "{{ (groups['hana_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+
+# - name: GCP append route for SAP AnyDB HA (route must be outside of existing VPC Subnet Range/s)
+# google.cloud.gcp_compute_route:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# name: "{{ sap_swpm_db_host }}-vip"
+# dest_range: "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+# next_hop_instance:
+# selfLink: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}"
+# network:
+# selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+# loop: "{{ (groups['anydb_primary'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: gcp_vpc_subnet_rt_route_sap_hana
+# when:
+# - groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)
+
+- name: GCP Private DNS Record for SAP AnyDB HA Virtual Hostname
+ google.cloud.gcp_dns_resource_record_set:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ managed_zone:
+ name: "{{ gcp_pdns_info.resources[0].name }}"
+ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ target:
+ - "{{ (sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+ type: A
+ ttl: 7200
+ loop: "{{ (groups['anydb_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+
+# - name: GCP append route for SAP NetWeaver ASCS HA (route must be outside of existing VPC Subnet Range/s)
+# google.cloud.gcp_compute_route:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# name: "{{ sap_swpm_ascs_instance_hostname }}-vip"
+# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}"
+# next_hop_instance:
+# selfLink: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}"
+# network:
+# selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+# loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: gcp_vpc_subnet_rt_route_sap_netweaver_ascs
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+- name: GCP Private DNS Record for SAP NetWeaver ASCS HA Virtual Hostname
+ google.cloud.gcp_dns_resource_record_set:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ managed_zone:
+ name: "{{ gcp_pdns_info.resources[0].name }}"
+ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ name: "{{ sap_swpm_ascs_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ target:
+ - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}"
+ type: A
+ ttl: 7200
+ loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# - name: GCP append route for SAP NetWeaver ERS HA
+# google.cloud.gcp_compute_route:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# name: "{{ sap_swpm_ers_instance_hostname }}-vip"
+# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}"
+# next_hop_instance:
+# selfLink: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}"
+# network:
+# selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+# loop: "{{ (groups['nwas_ers'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: gcp_vpc_subnet_rt_route_sap_netweaver_ers
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+- name: GCP Private DNS Record for SAP NetWeaver ERS HA Virtual Hostname
+ google.cloud.gcp_dns_resource_record_set:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ managed_zone:
+ name: "{{ gcp_pdns_info.resources[0].name }}"
+ dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ name: "{{ sap_swpm_ers_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+ target:
+ - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}"
+ type: A
+ ttl: 7200
+ loop: "{{ (groups['nwas_ers'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+## For HA of PAS and AAS, if required
+
+# - name: GCP append route for SAP NetWeaver PAS HA
+# google.cloud.gcp_compute_route:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# name: "{{ sap_swpm_pas_instance_hostname }}-vip"
+# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}"
+# next_hop_instance:
+# selfLink: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}"
+# network:
+# selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+# loop: "{{ (groups['nwas_pas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: gcp_vpc_subnet_rt_route_sap_netweaver_pas
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+# - name: GCP Private DNS Record for SAP NetWeaver PAS HA Virtual Hostname
+# google.cloud.gcp_dns_resource_record_set:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# managed_zone:
+# name: "{{ gcp_pdns_info.resources[0].name }}"
+# dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+# name: "{{ sap_swpm_pas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+# target:
+# - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}"
+# type: A
+# ttl: 7200
+# loop: "{{ (groups['nwas_pas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# - name: GCP append route for SAP NetWeaver AAS HA
+# google.cloud.gcp_compute_route:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# name: "{{ sap_swpm_aas_instance_hostname }}-vip"
+# dest_range: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}"
+# next_hop_instance:
+# selfLink: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}"
+# network:
+# selfLink: "{{ gcp_vpc_info.resources[0].selfLink }}"
+# loop: "{{ (groups['nwas_aas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: gcp_vpc_subnet_rt_route_sap_netweaver_aas
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+# - name: GCP Private DNS Record for SAP NetWeaver AAS HA Virtual Hostname
+# google.cloud.gcp_dns_resource_record_set:
+# state: present
+# project: "{{ sap_vm_provision_gcp_project }}"
+# managed_zone:
+# name: "{{ gcp_pdns_info.resources[0].name }}"
+# dnsName: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+# name: "{{ sap_swpm_aas_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}."
+# target:
+# - "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}"
+# type: A
+# ttl: 7200
+# loop: "{{ (groups['nwas_aas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# Google Cloud Load Balancing - Internal passthrough Network Load Balancer (NLB for TCP/UDP) and Reserved Static Internal IP Address
+
+# Recommended method is to use an internal passthrough Network Load Balancer (NLB for TCP/UDP) and Reserved Static Internal IP Address, with host health check response using socat or HAProxy
+# Refer to Google Cloud Compute Engine Reserved Static Internal IP Address, https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address
+# Refer to Google Cloud Load Balancing - Internal passthrough Network Load Balancer overview, https://cloud.google.com/load-balancing/docs/internal
+# Refer to SAP HANA guidance 1, https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#virtual_ip_address
+# Refer to SAP HANA guidance 2, https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#vip_implementation
+# Refer to SAP NetWeaver guidance, https://cloud.google.com/solutions/sap/docs/sap-hana-ha-planning-guide#virtual_ip_address
+
+# Verify Health Check range is accessible
+# Compute Engine health checks, 35.191.0.0/16 and 130.211.0.0/22 (domain 1e100.net)
+# Manual verification with...
+# tcpdump -i eth0 net 35.191.0.0/16 and dst port 55550/55551/55552
+# tcpdump -i eth0 net 130.211.0.0/22 and dst port 55550/55551/55552
+
+- name: Create Google Cloud Compute Engine Reserved Static Internal IP Address for the Virtual IP (VIP) of SAP HANA
+ google.cloud.gcp_compute_address:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ name: "lb-reserved-static-ip-vip-hana-{{ vip_item_nr }}"
+ address_type: internal
+ address: "{{ vip_item | regex_replace('/.*', '') }}"
+ #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier
+ purpose: GCE_ENDPOINT # GCE_ENDPOINT is for addresses used by VMs, alias IP ranges, and internal load balancers
+ register: gcp_lb_reserved_address
+ when:
+ - vip_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}"
+ loop_control:
+ index_var: vip_item_nr
+ loop_var: vip_item
+
+- name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP HANA
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-hana"
+ type: TCP
+ tcp_health_check:
+ port: 55550
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+- name: Gather GCP VM information
+ google.cloud.gcp_compute_instance_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ filters:
+ - name = {{ host_node }}
+ register: gcp_vm_info
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+- name: Create Google Cloud Compute Engine Instance Group (Self-Managed/Unmanaged) Primary - for SAP HANA
+ google.cloud.gcp_compute_instance_group:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-instance-group-hana-primary"
+ zone: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}"
+ instances:
+ - { "selfLink": "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" }
+ #named_ports:
+ # - name: http # default, not applicable to internal passthrough NLB, only applicable to proxy NLB
+ # port: 80 # default
+ register: gcp_lb_instance_group1
+ loop: "{{ (groups['hana_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+- name: Create Google Cloud Compute Engine Instance Group (Self-Managed/Unmanaged) Secondary (Failover) - for SAP HANA
+ google.cloud.gcp_compute_instance_group:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-instance-group-hana-secondary"
+ zone: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}"
+ instances:
+ - { "selfLink": "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" }
+ #named_ports:
+ # - name: http # default, not applicable to internal passthrough NLB, only applicable to proxy NLB
+ # port: 80 # default
+ register: gcp_lb_instance_group2
+ loop: "{{ (groups['hana_secondary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+# Note: Failover Ratio must be 1.0, which enforces failover to Secondary/Failover Instance Group if any VM the Backend Service's Primary Instance Group
+# No option for --global-health-checks ?
+- name: Create Google Cloud Compute Engine Backend Service (Regional) for the Internal passthrough Network Load Balancer used by SAP HANA
+ google.cloud.gcp_compute_region_backend_service:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-backend-service-hana"
+ backends:
+ - group: "{{ gcp_lb_instance_group1.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ #failover: false # Should be unset according to GCP for SAP documentation, which is different than set to false
+ - group: "{{ gcp_lb_instance_group2.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ failover: true
+ health_checks:
+ - "{{ gcp_lb_healthcheck_service.selfLink }}"
+ load_balancing_scheme: INTERNAL
+ failover_policy:
+ disable_connection_drain_on_failover: true
+ drop_traffic_if_unhealthy: true
+ failover_ratio: 1 # 1.0
+ session_affinity: NONE
+ #timeout_sec: 30 # value ignored for internal passthrough NLB, default 30s to wait for backend before failure - see https://cloud.google.com/load-balancing/docs/backend-service#timeout-setting
+ register: gcp_lb_backend_service_regional
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+- name: Create Google Cloud Compute Engine Forwarding Rule (aka. Frontend IP and Port) for SAP HANA
+ google.cloud.gcp_compute_forwarding_rule:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-fwd-rule-hana-{{ vip_item_nr }}"
+ #target: "{{ target_instance_group_pool }}"
+ ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined)
+ allow_global_access: false # Only for use if access to the SAP HANA Database Server is required from outside of the GCP Region
+ backend_service: { "selfLink": "{{ gcp_lb_backend_service_regional.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'"
+ #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-hana" }
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ load_balancing_scheme: INTERNAL
+ network_tier: PREMIUM
+ register: gcp_lb_forwarding_rule
+ when:
+ - vip_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}"
+ loop_control:
+ index_var: vip_item_nr
+ loop_var: vip_item
+
+- name: Get information on Google Cloud Compute Engine (Regional) Backend Service
+ google.cloud.gcp_compute_region_backend_service_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ filters:
+ - name = "lb-backend-service-hana"
+ register: gcp_info_lb_backend_service_regional
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+
+- name: Create Google Cloud Compute Engine Reserved Static Internal IP Address for the Virtual IP (VIP) of SAP AnyDB
+ google.cloud.gcp_compute_address:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ name: "lb-reserved-static-ip-vip-anydb-{{ vip_item_nr }}"
+ address_type: internal
+ address: "{{ vip_item | regex_replace('/.*', '') }}"
+ #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier
+ purpose: GCE_ENDPOINT # GCE_ENDPOINT is for addresses used by VMs, alias IP ranges, and internal load balancers
+ register: gcp_lb_reserved_address
+ when:
+ - vip_item | length > 0
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+ loop:
+ - "{{ sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32') }}"
+ loop_control:
+ index_var: vip_item_nr
+ loop_var: vip_item
+
+- name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP AnyDB
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-anydb"
+ type: TCP
+ tcp_health_check:
+ port: 55550
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+- name: Gather GCP VM information
+ google.cloud.gcp_compute_instance_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ filters:
+ - name = {{ host_node }}
+ register: gcp_vm_info
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+- name: Create Google Cloud Compute Engine Instance Group (Self-Managed/Unmanaged) Primary - for SAP AnyDB
+ google.cloud.gcp_compute_instance_group:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-instance-group-anydb-primary"
+ zone: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}"
+ instances:
+ - { "selfLink": "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" }
+ #named_ports:
+ # - name: http # default, not applicable to internal passthrough NLB, only applicable to proxy NLB
+ # port: 80 # default
+ register: gcp_lb_instance_group1
+ loop: "{{ (groups['anydb_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+- name: Create Google Cloud Compute Engine Instance Group (Self-Managed/Unmanaged) Secondary (Failover) - for SAP AnyDB
+ google.cloud.gcp_compute_instance_group:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-instance-group-anydb-secondary"
+ zone: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}"
+ instances:
+ - { "selfLink": "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" }
+ #named_ports:
+ # - name: http # default, not applicable to internal passthrough NLB, only applicable to proxy NLB
+ # port: 80 # default
+ register: gcp_lb_instance_group2
+ loop: "{{ (groups['anydb_secondary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+# Note: Failover Ratio must be 1.0, which enforces failover to Secondary/Failover Instance Group if any VM the Backend Service's Primary Instance Group
+# No option for --global-health-checks ?
+- name: Create Google Cloud Compute Engine Backend Service (Regional) for the Internal passthrough Network Load Balancer used by SAP AnyDB
+ google.cloud.gcp_compute_region_backend_service:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-backend-service-anydb"
+ backends:
+ - group: "{{ gcp_lb_instance_group1.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ #failover: false # Should be unset according to GCP for SAP documentation, which is different than set to false
+ - group: "{{ gcp_lb_instance_group2.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ failover: true
+ health_checks:
+ - "{{ gcp_lb_healthcheck_service.selfLink }}"
+ load_balancing_scheme: INTERNAL
+ failover_policy:
+ disable_connection_drain_on_failover: true
+ drop_traffic_if_unhealthy: true
+ failover_ratio: 1 # 1.0
+ session_affinity: NONE
+ #timeout_sec: 30 # value ignored for internal passthrough NLB, default 30s to wait for backend before failure - see https://cloud.google.com/load-balancing/docs/backend-service#timeout-setting
+ register: gcp_lb_backend_service_regional
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+- name: Create Google Cloud Compute Engine Forwarding Rule (aka. Frontend IP and Port) for SAP AnyDB
+ google.cloud.gcp_compute_forwarding_rule:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-fwd-rule-anydb-{{ vip_item_nr }}"
+ #target: "{{ target_instance_group_pool }}"
+ ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined)
+ allow_global_access: false # Only for use if access to the SAP AnyDB Database Server is required from outside of the GCP Region
+ backend_service: { "selfLink": "{{ gcp_lb_backend_service_regional.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'"
+ #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-anydb" }
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ load_balancing_scheme: INTERNAL
+ network_tier: PREMIUM
+ register: gcp_lb_forwarding_rule
+ when:
+ - vip_item | length > 0
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+ loop:
+ - "{{ sap_vm_temp_vip_anydb_primary | default('192.168.1.90/32') }}"
+ loop_control:
+ index_var: vip_item_nr
+ loop_var: vip_item
+
+- name: Get information on Google Cloud Compute Engine (Regional) Backend Service
+ google.cloud.gcp_compute_region_backend_service_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ filters:
+ - name = "lb-backend-service-anydb"
+ register: gcp_info_lb_backend_service_regional
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+
+- name: Create Google Cloud Compute Engine Reserved Static Internal IP Address for the Virtual IP (VIP) of SAP NetWeaver ASCS
+ google.cloud.gcp_compute_address:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ name: "lb-reserved-static-ip-vip-nwas-ascs"
+ address_type: internal
+ address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | regex_replace('/.*', '') }}"
+ #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier
+ purpose: GCE_ENDPOINT # GCE_ENDPOINT is for addresses used by VMs, alias IP ranges, and internal load balancers
+ register: gcp_lb_reserved_address
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Reserved Static Internal IP Address for the Virtual IP (VIP) of SAP NetWeaver ERS
+ google.cloud.gcp_compute_address:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ name: "lb-reserved-static-ip-vip-nwas-ers"
+ address_type: internal
+ address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | regex_replace('/.*', '') }}"
+ #network_tier: PREMIUM # An address with type INTERNAL cannot have a network tier
+ purpose: GCE_ENDPOINT # GCE_ENDPOINT is for addresses used by VMs, alias IP ranges, and internal load balancers
+ register: gcp_lb_reserved_address
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP NetWeaver ASCS
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-nwas-ascs"
+ type: TCP
+ tcp_health_check:
+ port: 55551
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service_ascs
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP NetWeaver ERS
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-nwas-ers"
+ type: TCP
+ tcp_health_check:
+ port: 55552
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service_ers
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Gather GCP VM information
+ google.cloud.gcp_compute_instance_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ filters:
+ - name = {{ host_node }}
+ register: gcp_vm_info
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Instance Group (Self-Managed/Unmanaged) Primary - for SAP NetWeaver ASCS
+ google.cloud.gcp_compute_instance_group:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-instance-group-nwas-ascs"
+ zone: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}"
+ instances:
+ - { "selfLink": "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" }
+ #named_ports:
+ # - name: http # default, not applicable to internal passthrough NLB, only applicable to proxy NLB
+ # port: 80 # default
+ register: gcp_lb_instance_group1
+ loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Instance Group (Self-Managed/Unmanaged) Secondary (Failover) - for SAP NetWeaver ERS
+ google.cloud.gcp_compute_instance_group:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-instance-group-nwas-ers"
+ zone: "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].zone') | flatten | join(' ') | basename }}"
+ instances:
+ - { "selfLink": "{{ gcp_vm_info | json_query('results[*].resources[?name==`' + host_node + '`].selfLink') | flatten | join(' ') }}" }
+ #named_ports:
+ # - name: http # default, not applicable to internal passthrough NLB, only applicable to proxy NLB
+ # port: 80 # default
+ register: gcp_lb_instance_group2
+ loop: "{{ (groups['nwas_ers'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+# Note: Failover Ratio must be 1.0, which enforces failover to Secondary/Failover Instance Group if any VM the Backend Service's Primary Instance Group
+# No option for --global-health-checks ?
+- name: Create Google Cloud Compute Engine Backend Service (Regional) for the Internal passthrough Network Load Balancer used by SAP NetWeaver ASCS
+ google.cloud.gcp_compute_region_backend_service:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-backend-service-nwas-ascs"
+ backends:
+ - group: "{{ gcp_lb_instance_group1.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ #failover: false # Should be unset according to GCP for SAP documentation, which is different than set to false
+ - group: "{{ gcp_lb_instance_group2.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ failover: true
+ health_checks:
+ - "{{ gcp_lb_healthcheck_service_ascs.selfLink }}"
+ load_balancing_scheme: INTERNAL
+ failover_policy:
+ disable_connection_drain_on_failover: true
+ drop_traffic_if_unhealthy: true
+ failover_ratio: 1 # 1.0
+ session_affinity: NONE
+ #timeout_sec: 30 # value ignored for internal passthrough NLB, default 30s to wait for backend before failure - see https://cloud.google.com/load-balancing/docs/backend-service#timeout-setting
+ register: gcp_lb_backend_service_regional_ascs
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+# Note: Failover Ratio must be 1.0, which enforces failover to Secondary/Failover Instance Group if any VM the Backend Service's Primary Instance Group
+# No option for --global-health-checks ?
+- name: Create Google Cloud Compute Engine Backend Service (Regional) for the Internal passthrough Network Load Balancer used by SAP NetWeaver ERS
+ google.cloud.gcp_compute_region_backend_service:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-backend-service-nwas-ers"
+ backends:
+ - group: "{{ gcp_lb_instance_group2.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ #failover: false # Should be unset according to GCP for SAP documentation, which is different than set to false
+ - group: "{{ gcp_lb_instance_group1.results[0].selfLink }}"
+ balancing_mode: CONNECTION # UTILIZATION , RATE , CONNECTION
+ failover: true
+ health_checks:
+ - "{{ gcp_lb_healthcheck_service_ers.selfLink }}"
+ load_balancing_scheme: INTERNAL
+ failover_policy:
+ disable_connection_drain_on_failover: true
+ drop_traffic_if_unhealthy: true
+ failover_ratio: 1 # 1.0
+ session_affinity: NONE
+ #timeout_sec: 30 # value ignored for internal passthrough NLB, default 30s to wait for backend before failure - see https://cloud.google.com/load-balancing/docs/backend-service#timeout-setting
+ register: gcp_lb_backend_service_regional_ers
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Forwarding Rule (aka. Frontend IP and Port) for SAP NetWeaver ASCS
+ google.cloud.gcp_compute_forwarding_rule:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-fwd-rule-nwas-ascs"
+ #target: "{{ target_instance_group_pool }}"
+ ip_address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | regex_replace('/.*', '') }}"
+ all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined)
+ allow_global_access: false # Only for use if access to the SAP NetWeaver Database Server is required from outside of the GCP Region
+ backend_service: { "selfLink": "{{ gcp_lb_backend_service_regional_ascs.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'"
+ #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-nwas-ascs" }
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ load_balancing_scheme: INTERNAL
+ network_tier: PREMIUM
+ register: gcp_lb_forwarding_rule
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Create Google Cloud Compute Engine Forwarding Rule (aka. Frontend IP and Port) for SAP NetWeaver ERS
+ google.cloud.gcp_compute_forwarding_rule:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ name: "lb-fwd-rule-nwas-ascs"
+ #target: "{{ target_instance_group_pool }}"
+ ip_address: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | regex_replace('/.*', '') }}"
+ all_ports: true # For internal network load balancing, allow any ports to be forwarded to the backend service (can not be set if ports are defined)
+ allow_global_access: false # Only for use if access to the SAP NetWeaver Database Server is required from outside of the GCP Region
+ backend_service: { "selfLink": "{{ gcp_lb_backend_service_regional_ascs.selfLink }}" } # Mandatory, otherwise error "Invalid value for field 'resource.target'"
+ #backend_service: { "selfLink": "https://www.googleapis.com/compute/v1/projects/{{ sap_vm_provision_gcp_project }}/regions/{{ sap_vm_provision_gcp_region }}/backendServices/lb-backend-service-nwas-ascs" }
+ subnetwork: { "selfLink": "{{ gcp_vpc_subnet_info.resources[0].selfLink }}" }
+ load_balancing_scheme: INTERNAL
+ network_tier: PREMIUM
+ register: gcp_lb_forwarding_rule
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+- name: Get information on Google Cloud Compute Engine (Regional) Backend Service for SAP NetWeaver ASCS
+ google.cloud.gcp_compute_region_backend_service_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ filters:
+ - name = "lb-backend-service-nwas-ascs"
+ register: gcp_info_lb_backend_service_regional
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: Get information on Google Cloud Compute Engine (Regional) Backend Service for SAP NetWeaver ERS
+ google.cloud.gcp_compute_region_backend_service_info:
+ project: "{{ sap_vm_provision_gcp_project }}"
+ region: "{{ sap_vm_provision_gcp_region }}"
+ filters:
+ - name = "lb-backend-service-nwas-ers"
+ register: gcp_info_lb_backend_service_regional
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml
new file mode 100644
index 0000000..6941662
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/gcp_ce_vm/post_deployment_execute.yml
@@ -0,0 +1,110 @@
+---
+
+- name: Ansible Task block for amending Load Balancer ports for High Availability - after provisioning GCP CE VMs
+ delegate_to: localhost
+ run_once: true
+ environment:
+ GCP_AUTH_KIND: "serviceaccount"
+ GCP_SERVICE_ACCOUNT_FILE: "{{ sap_vm_provision_gcp_credentials_json }}"
+ when:
+ - sap_ha_pacemaker_cluster_msazure_resource_group is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Inherit variable - set fact for Google Cloud Compute Engine Health Check (Global) - SAP HANA
+ ansible.builtin.set_fact:
+ gcp_lb_healthcheck_hana: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_hana_primary_port is defined
+
+ - name: Inherit variable - set fact for Google Cloud Compute Engine Health Check (Global) - SAP NWAS ASCS
+ ansible.builtin.set_fact:
+ gcp_lb_healthcheck_nwas_ascs: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port is defined
+
+ - name: Inherit variable - set fact for Google Cloud Compute Engine Health Check (Global) - SAP NWAS ERS
+ ansible.builtin.set_fact:
+ gcp_lb_healthcheck_nwas_ers: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port is defined
+
+ - name: Default variable - Set fact for Google Cloud Compute Engine Health Check (Global) - SAP HANA
+ ansible.builtin.set_fact:
+ gcp_lb_healthcheck_hana: "{{ ('620' + (sap_system_hana_db_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_hana_primary_port is defined
+
+ - name: Default variable - Set fact for Google Cloud Compute Engine Health Check (Global) - SAP NWAS ASCS
+ ansible.builtin.set_fact:
+ gcp_lb_healthcheck_nwas_ascs: "{{ ('620' + (sap_system_nwas_abap_ascs_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port is defined
+
+ - name: Default variable - Set fact for Google Cloud Compute Engine Health Check (Global) - SAP NWAS ERS
+ ansible.builtin.set_fact:
+ gcp_lb_healthcheck_nwas_ers: "{{ ('620' + (sap_system_nwas_abap_ers_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port is defined
+
+ - name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP HANA
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-hana"
+ type: TCP
+ tcp_health_check:
+ port: "{{ gcp_lb_healthcheck_hana }}"
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+ - name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP AnyDB
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-anydb"
+ type: TCP
+ tcp_health_check:
+ port: 62700
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+ - name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP NetWeaver ASCS
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-nwas-ascs"
+ type: TCP
+ tcp_health_check:
+ port: "{{ gcp_lb_healthcheck_nwas_ascs }}"
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service_ascs
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+ - name: Create Google Cloud Compute Engine Health Check (Global) service instance for SAP NetWeaver ERS
+ google.cloud.gcp_compute_health_check:
+ state: present
+ project: "{{ sap_vm_provision_gcp_project }}"
+ name: "lb-probe-hc-vip-nwas-ers"
+ type: TCP
+ tcp_health_check:
+ port: "{{ gcp_lb_healthcheck_nwas_ers }}"
+ proxy_header: NONE
+ check_interval_sec: 10
+ timeout_sec: 10
+ unhealthy_threshold: 2
+ healthy_threshold: 2
+ register: gcp_lb_healthcheck_service_ers
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml
new file mode 100644
index 0000000..f24a27b
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_main.yml
@@ -0,0 +1,261 @@
+---
+
+- name: Set fact for IBM Power VS location to the colocated IBM Cloud Availability Zone (VPC)
+ ansible.builtin.set_fact:
+ list_ibmcloud_powervs_location_to_ibmcloud_availability_zone:
+ dal12: "us-south-2"
+ us-south: "us-south-3" # naming of IBM Power VS location 'us-south' was previous naming convention, would otherwise be 'DAL13'
+ us-east: "us-east-1" # naming of IBM Power VS location 'us-east' was previous naming convention, would otherwise be 'WDC04'
+ # wdc06: "us-east-2" # No Cloud Connection available at this location
+ sao01: "br-sao-1"
+ tor01: "ca-tor-1"
+ eu-de-1: "eu-de-2" # naming of IBM Power VS location 'eu-de-1' was previous naming convention, would otherwise be 'FRA04'
+ eu-de-2: "eu-de-3" # naming of IBM Power VS location 'eu-de-2' was previous naming convention, would otherwise be 'FRA05'
+ lon04: "eu-gb-1"
+ lon06: "eu-gb-3"
+ syd04: "au-syd-2"
+ syd05: "au-syd-3"
+ tok04: "jp-tok-2"
+ osa21: "jp-osa-1"
+
+# IBM Cloud Regional API Endpoint = https://<>.cloud.ibm.com/
+# IBM Power VS (on IBM Cloud) Regional API Endpoint = https://<>.power-iaas.cloud.ibm.com/
+- name: Set fact for IBM Power VS location to the IBM Power VS Region API Endpoints
+ ansible.builtin.set_fact:
+ list_ibmcloud_powervs_location_to_powervs_region:
+ dal12: "us-south"
+ us-south: "us-south"
+ us-east: "us-east"
+ # wdc06: "us-east" # no Cloud Connection available at this location
+ sao01: "sao"
+ tor01: "tor"
+ eu-de-1: "eu-de"
+ eu-de-2: "eu-de"
+ lon04: "lon"
+ lon06: "lon"
+ syd04: "syd"
+ syd05: "syd"
+ tok04: "tok"
+ osa21: "osa"
+
+- name: Set fact for IBM Power Infrastructure location to the colocated IBM Cloud Region
+ ansible.builtin.set_fact:
+ sap_vm_provision_ibmcloud_region: "{{ list_ibmcloud_powervs_location_to_ibmcloud_availability_zone[sap_vm_provision_ibmcloud_powervs_location] | regex_replace('-[0-9]', '') }}"
+ sap_vm_provision_ibmcloud_powervs_region: "{{ list_ibmcloud_powervs_location_to_powervs_region[sap_vm_provision_ibmcloud_powervs_location] }}"
+
+- name: Ansible Task block for looped provisioning of IBM Power Virtual Servers on IBM Cloud
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}"
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_region }}"
+ IC_ZONE: "{{ sap_vm_provision_ibmcloud_powervs_location }}" # Required only for IBM Power VS, to set IBM Power VS location
+ block:
+
+ - name: Identify Resource Group info
+ register: register_ibmcloud_resource_group
+ ibm.cloudcollection.ibm_resource_group_info:
+ name: "{{ sap_vm_provision_ibmcloud_resource_group_name }}"
+
+ - name: Identify IBM Power Infrastructure Workspace
+ register: register_ibmcloud_power_iaas_workspace_service_instance
+ ibm.cloudcollection.ibm_resource_instance_info:
+ resource_group_id: "{{ register_ibmcloud_resource_group.resource.id }}"
+ location: "{{ sap_vm_provision_ibmcloud_powervs_location }}"
+ service: power-iaas
+ name: "{{ sap_vm_provision_ibmcloud_powervs_workspace_name }}"
+
+ - name: Identify pre-loaded Power Infrastructure SSH Public Key info
+ register: register_ibmcloud_pi_ssh_public_key
+ environment:
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}"
+ ibm.cloudcollection.ibm_pi_key_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_key_name: "{{ sap_vm_provision_ibmcloud_powervs_key_pair_name_ssh_host_public_key }}"
+
+ - name: Identify Power Infrastructure VLAN Subnet info
+ register: register_ibmcloud_pi_subnet
+ environment:
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}"
+ ibm.cloudcollection.ibm_pi_network_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_network_name: "{{ sap_vm_provision_ibmcloud_powervs_vlan_subnet_name }}"
+
+ - name: Identify Power Infrastructure OS Image list
+ register: register_ibmcloud_pi_os_image_list
+ environment:
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}"
+ ibm.cloudcollection.ibm_pi_catalog_images_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+
+ - name: Identify Private DNS instance
+ register: register_ibmcloud_pdns_service_instance
+ ibm.cloudcollection.ibm_resource_instance_info:
+ resource_group_id: "{{ register_ibmcloud_resource_group.resource.id }}"
+ location: global
+ service: dns-svcs
+ name: "{{ sap_vm_provision_ibmcloud_private_dns_instance_name }}"
+
+ - name: Identify Private DNS Zone info
+ register: register_ibmcloud_pdns
+ ibm.cloudcollection.ibm_dns_zones_info:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+
+ - name: Set fact for latest IBM Power Infrastructure OS Catalog Stock Image
+ ansible.builtin.set_fact:
+ register_ibmcloud_pi_os_image_selected: "{{ register_ibmcloud_pi_os_image_list.resource.images | selectattr('name', 'search', lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_ibmcloud_powervs_host_os_image]) | sort(reverse=True,case_sensitive=False,attribute='name') | first }}"
+
+ - name: Create Boot Image from IBM Power Infrastructure OS Catalog Stock Image
+ register: register_provisioned_os_image
+ environment:
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}"
+ ibm.cloudcollection.ibm_pi_image:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_image_id: "{{ register_ibmcloud_pi_os_image_selected.image_id }}"
+ pi_image_name: "{{ sap_vm_provision_ibmcloud_powervs_host_os_image }}-boot"
+ failed_when: not register_provisioned_os_image.rc == 0 and not 'already exists' in register_provisioned_os_image.stderr
+ run_once: true
+
+ # Use check to avoid idempotency issues with legacy ibm.cloudcollection Ansible Collection (until ibm.cloud Ansible Collection is ready)
+ - name: Check for existing Boot Image imported already from IBM Power Infrastructure OS Catalog Stock Image
+ register: register_existing_os_image
+ environment:
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}"
+ ibm.cloudcollection.ibm_pi_image_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_image_name: "{{ register_ibmcloud_pi_os_image_selected.name }}"
+ run_once: true
+
+ - name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+ - name: Provision IBM Power Virtual Server hosts on IBM Cloud
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ apply:
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}"
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_powervs_region }}"
+ IC_ZONE: "{{ sap_vm_provision_ibmcloud_powervs_location }}" # Required only for IBM Power VS, to set IBM Power VS location
+
+ - name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].resource.addresses[0].ip }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ - name: IBM Cloud Private DNS Record for hosts
+ register: register_ibmcloud_pdns_record
+ ibm.cloudcollection.ibm_dns_resource_record:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+ zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ name: "{{ inventory_hostname }}.{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}" # Host FQDN
+ rdata: "{{ hostvars[inventory_hostname].ansible_host }}" # IP Address
+ type: A
+ ttl: 7200
+ failed_when: not register_ibmcloud_pdns_record.rc == 0 and not 'The record already exists' in register_ibmcloud_pdns_record.stderr
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+ - name: Append IBM Cloud Private DNS to /etc/resolv.conf
+ ansible.builtin.lineinfile:
+ path: /etc/resolv.conf
+ line: nameserver 161.26.0.10
+
+ - name: Register Web Forward Proxy
+ ansible.builtin.include_tasks:
+ file: common/register_proxy.yml
+
+ # Extract the generated command string and activation key from /usr/share, then execute script from /usr/local/bin
+ # Use nohup to ensure completion, wait 2 minutes
+ # Verify with /var/log/rhsm/rhsm.log if necessary
+ - name: Execute OS Activation Script for IBM Power Virtual Server - RHEL
+ ansible.builtin.shell: |
+ set -o pipefail && web_proxy_ip_port="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}"
+ set -o pipefail && if [[ ! -f /usr/share/powervs-fls/powervs-fls-readme.md ]]; then echo "File does not exist" && exit 1; fi
+ set -o pipefail && activation_script_exec=$(cat /usr/share/powervs-fls/powervs-fls-readme.md | grep networklayer.com | sed "s|Private.proxy.IP.address:3128|$web_proxy_ip_port|" | sed 's|. ||')
+ set -o pipefail && nohup $activation_script_exec >/dev/null 2>&1
+ set -o pipefail && sleep 120
+ when: ansible_os_family == "RedHat"
+
+ # Extract the generated command string and activation key from /usr/share, then execute script from /usr/local/bin
+ # Use nohup to ensure completion, wait 2 minutes
+ - name: Execute OS Activation Script for IBM Power Virtual Server - SLES
+ ansible.builtin.shell: |
+ set -o pipefail && web_proxy_ip_port="{{ sap_vm_provision_proxy_web_forward_proxy_ip }}"
+ set -o pipefail && if [[ ! -f /usr/share/powervs-fls/powervs-fls-readme.md ]]; then echo "File does not exist" && exit 1; fi
+ set -o pipefail && activation_script_exec=$(cat /usr/share/powervs-fls/powervs-fls-readme.md | grep networklayer.com | sed 's|. ||' | sed "s|$|$web_proxy_ip_port|")
+ set -o pipefail && nohup $activation_script_exec >/dev/null 2>&1
+ set -o pipefail && sleep 60
+ set -o pipefail && SUSEConnect --product PackageHub/{{ ansible_distribution_version }}/ppc64le
+ when: ansible_os_family == "Suse"
+
+ # Enusure lock to RHEL major.minor version
+ # Lock using subscription-manager release --set or /var/lib/rhsm/cache/releasever.json, alternatively using /etc/yum/vars/releasever or /etc/dnf/vars/releasever
+
+ - name: Set facts on each host - HA/DR
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_vip_client_interface: "{{ ansible_default_ipv4.interface }}" # Assume IBM Power Virtual Server vNIC is set as default (e.g. env2)
+ when: sap_ha_pacemaker_cluster_ibmcloud_api_key is defined
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml
new file mode 100644
index 0000000..6bf99c5
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/execute_provision.yml
@@ -0,0 +1,205 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+
+# Status will change from Building > Warning (VM = Active, Health = Warning) > Active. The Ansible Task will continue once the Active status has been reached.
+- name: Provision IBM Power Virtual Server instance on IBM Cloud
+ register: register_provision_host_single
+ ibm.cloudcollection.ibm_pi_instance:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+
+ pi_instance_name: "{{ inventory_hostname }}"
+ pi_image_id: "{{ register_provisioned_os_image.resource.id if register_provisioned_os_image.rc == 0 else register_existing_os_image.resource.id }}"
+
+ pi_sys_type: e980
+ pi_sap_profile_id: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}"
+ pi_key_pair_name: "{{ sap_vm_provision_ibmcloud_powervs_key_pair_name_ssh_host_public_key }}"
+
+ pi_network:
+ - network_id: "{{ register_ibmcloud_pi_subnet.resource.id }}"
+
+ pi_storage_type: tier1
+ #pi_volume_ids: []
+
+ pi_pin_policy: none
+ pi_health_status: OK
+
+# Use check to avoid idempotency issues with legacy ibm.cloudcollection Ansible Collection (until ibm.cloud Ansible Collection is out of beta)
+- name: Check IBM Power Virtual Server instance on IBM Cloud
+ register: register_provisioned_host_single
+ ibm.cloudcollection.ibm_pi_instance_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_instance_name: "{{ inventory_hostname }}"
+
+
+# Create flat list with names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ storage_disks_map: |
+ {% set disks_map = [] -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = disks_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('general-purpose')
+ }
+ ]) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ disks_map }}
+
+
+- name: Provision IBM Power Infrastructure Block Storage volumes for IBM Power VS instance filesystems
+ register: register_provisioned_volumes
+ ibm.cloudcollection.ibm_pi_volume:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_volume_name: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}"
+ pi_volume_type: "{{ vol_item.type }}"
+ pi_volume_size: "{{ vol_item.size }}"
+ pi_volume_shareable: false
+ pi_replication_enabled: false
+ #delete_on_termination: true
+ loop: "{{ storage_disks_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.size > 0
+ failed_when:
+ - not register_provisioned_volumes.rc == 0
+ - not 'already exists' in register_provisioned_volumes.stderr
+
+# Use check to avoid idempotency issues with legacy ibm.cloudcollection Ansible Collection (until ibm.cloud Ansible Collection is out of beta)
+- name: Check status of IBM Power Infrastructure Block Storage volumes
+ register: register_volumes
+ ibm.cloudcollection.ibm_pi_volume_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_volume_name: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}"
+ loop: "{{ storage_disks_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}"
+ when:
+ - vol_item.size > 0
+ retries: 5
+ until: register_volumes.rc == 0 and (register_volumes.resource is defined and register_volumes.resource.state == "available", "in-use")
+ delay: 20
+
+- name: Attach IBM Power Infrastructure Block Storage volumes as filesystem for IBM Power VS instance
+ register: register_attached_volumes
+ ibm.cloudcollection.ibm_pi_volume_attach:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_volume_id: "{{ vol_item.resource.id }}"
+ pi_instance_id: "{{ register_provisioned_host_single.resource.id }}"
+ loop: "{{ register_volumes.results }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.resource.pi_volume_name }}"
+ failed_when:
+ - not register_attached_volumes.rc == 0
+ - not 'volume cannot be attached in the current state' in register_attached_volumes.stderr # when already attached message
+ retries: 1
+ until: register_attached_volumes is success
+ delay: 10
+
+- name: Read IBM Power Virtual Server information
+ register: instance_info
+ ibm.cloudcollection.ibm_pi_instance_info:
+ pi_cloud_instance_id: "{{ register_ibmcloud_power_iaas_workspace_service_instance.resource.guid }}" # must be GUID, not CRN
+ pi_instance_name: "{{ register_provisioned_host_single.resource.pi_instance_name }}"
+
+- name: Add host facts
+ ansible.builtin.set_fact:
+ volume_provisioning: "{{ register_volumes }}"
+ instance_info: "{{ instance_info }}"
+ delegate_to: "{{ inventory_hostname }}"
+ delegate_facts: true
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single.resource.addresses[0].ip }}"
+
+
+- name: Copy facts to delegate host
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ delegate_sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ delegate_sap_vm_provision_bastion_public_ip: "{{ sap_vm_provision_bastion_public_ip }}"
+ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}"
+ delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ delegate_private_ip: "{{ register_provisioned_host_single.resource.addresses[0].ip }}"
+ delegate_hostname: "{{ inventory_hostname }}"
+ delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}"
+
+
+### begin block, parameters will be applied to each task within the block
+- name: Allow login from root OS User
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ vars:
+ ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ block:
+
+ - name: Create .ssh directory for root user
+ ansible.builtin.file:
+ path: /root/.ssh
+ state: directory
+ mode: '0744'
+
+ - name: Create root authorized_keys file and entries
+ ansible.builtin.copy:
+ dest: /root/.ssh/authorized_keys
+ mode: '0600'
+ content: |
+ {{ register_ibmcloud_pi_ssh_public_key.resource.ssh_key }}
+
+ - name: Permit root login
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '(^PermitRootLogin no)'
+ replace: 'PermitRootLogin yes'
+ register: sshd_config
+
+ - name: Reload sshd service
+ ansible.builtin.service:
+ name: sshd
+ state: reloaded
+ when:
+ - sshd_config.changed
+
+### end of block
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/post_deployment_execute.yml
new file mode 100644
index 0000000..19c7341
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_powervs/post_deployment_execute.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Post Deployment notification
+ ansible.builtin.debug:
+ msg: "There are no Post Deployment tasks for SAP on this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml
new file mode 100644
index 0000000..c947801
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_main.yml
@@ -0,0 +1,176 @@
+---
+
+- name: Ansible Task block for looped provisioning of IBM Cloud Virtual Servers
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}"
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_region }}"
+ block:
+
+ - name: Identify Resource Group info
+ register: register_ibmcloud_resource_group
+ ibm.cloudcollection.ibm_resource_group_info:
+ name: "{{ sap_vm_provision_ibmcloud_resource_group_name }}"
+
+ - name: Identify pre-loaded SSH Public Key info
+ register: register_ibmcloud_ssh_public_key
+ ibm.cloudcollection.ibm_is_ssh_key_info:
+ name: "{{ sap_vm_provision_ibmcloud_key_pair_name_ssh_host_public_key }}"
+
+ - name: Identify VPC Subnet info
+ register: register_ibmcloud_vpc_subnet
+ ibm.cloudcollection.ibm_is_subnet_info:
+ name: "{{ sap_vm_provision_ibmcloud_vpc_subnet_name }}"
+
+ - name: Identify VPC Security Group info
+ register: register_ibmcloud_vpc_sg
+ ibm.cloudcollection.ibm_is_security_group_info:
+ name: "{{ item }}"
+ loop: "{{ sap_vm_provision_ibmcloud_vpc_sg_names | split(',') }}"
+
+ - name: Identify Private DNS instance
+ register: register_ibmcloud_pdns_service_instance
+ ibm.cloudcollection.ibm_resource_instance_info:
+ resource_group_id: "{{ register_ibmcloud_resource_group.resource.id }}"
+ location: global
+ service: dns-svcs
+ name: "{{ sap_vm_provision_ibmcloud_private_dns_instance_name }}"
+
+ - name: Identify Private DNS Zone info
+ register: register_ibmcloud_pdns
+ ibm.cloudcollection.ibm_dns_zones_info:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+
+ - name: Identify OS Image list
+ register: register_ibmcloud_os_image_list
+ ibm.cloudcollection.ibm_is_images_info:
+ status: available
+
+ - name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+ - name: Provision hosts to IBM Cloud
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ apply:
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}"
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_region }}"
+
+ - name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].resource.primary_network_interface[0].primary_ipv4_address }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ - name: IBM Cloud Private DNS Record for hosts
+ register: register_ibmcloud_pdns_record
+ ibm.cloudcollection.ibm_dns_resource_record:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+ zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ name: "{{ inventory_hostname }}.{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}" # Host FQDN
+ rdata: "{{ hostvars[inventory_hostname].ansible_host }}" # IP Address
+ type: A
+ ttl: 7200
+ failed_when: not register_ibmcloud_pdns_record.rc == 0 and not 'The record already exists' in register_ibmcloud_pdns_record.stderr
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+
+# - name: Ansible Task block to execute on target inventory hosts - High Availability
+# delegate_to: "{{ inventory_hostname }}"
+# when:
+# - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+# - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+# block:
+
+
+- name: Ansible Task block for looped provisioning of High Availability resources for IBM Cloud VS instances
+ delegate_to: localhost
+ run_once: true
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}" # For legacy Ansible Collection
+ IBMCLOUD_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}" # For IBM Cloud CLI quiet login
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_region }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Provision High Availability resources for IBM Cloud hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_setup_ha.yml"
+ apply:
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}" # For legacy Ansible Collection
+ IBMCLOUD_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}" # For IBM Cloud CLI quiet login
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_region }}"
+
+
+- name: Ansible Task block to execute on target inventory hosts for HA
+ delegate_to: "{{ inventory_hostname }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml
new file mode 100644
index 0000000..3f61147
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_provision.yml
@@ -0,0 +1,179 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+- name: Provision IBM Cloud Virtual Server instance
+ register: register_provisioned_host_single
+ ibm.cloudcollection.ibm_is_instance:
+ state: available
+ name: "{{ inventory_hostname }}"
+ image: "{{ (register_ibmcloud_os_image_list.resource.images | select('search', lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_ibmcloud_vs_host_os_image]) | sort(reverse=True,case_sensitive=False,attribute='name') | first).id }}"
+ profile: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}"
+ keys:
+ - "{{ register_ibmcloud_ssh_public_key.resource.id }}"
+
+ resource_group: "{{ register_ibmcloud_resource_group.resource.id }}"
+ zone: "{{ sap_vm_provision_ibmcloud_availability_zone }}"
+ vpc: "{{ register_ibmcloud_vpc_subnet.resource.vpc }}"
+
+ # The Subnet assigned to the primary Virtual Network Interface (vNIC) cannot be changed
+ # The Name and Security Group assigned to the Primary Network Interface (vNIC) are editable
+ primary_network_interface:
+ - name: "{{ inventory_hostname }}-vnic0"
+ subnet: "{{ register_ibmcloud_vpc_subnet.resource.id }}"
+ allow_ip_spoofing: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Allow IP Spoofing = true
+ security_groups: "{{ register_ibmcloud_vpc_sg.results | map(attribute='resource.id') }}"
+ #network_interfaces:
+
+ auto_delete_volume: true
+ boot_volume:
+ - name: "{{ inventory_hostname }}-boot-0"
+
+ metadata_service:
+ - enabled: true
+ protocol: https
+ response_hop_limit: 5
+
+
+# Create flat list with names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ storage_disks_map: |
+ {% set disks_map = [] -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = disks_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('general-purpose')
+ }
+ ]) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ disks_map }}
+
+- name: Provision IBM Cloud Block Storage volumes for IBM Cloud VS instance filesystems
+ ibm.cloudcollection.ibm_is_volume:
+ resource_group: "{{ register_ibmcloud_resource_group.resource.id }}"
+ zone: "{{ sap_vm_provision_ibmcloud_availability_zone }}"
+ name: "{{ inventory_hostname + '-vol-' + vol_item.name | replace('_', '-')}}"
+ profile: "{{ vol_item.type }}"
+ capacity: "{{ vol_item.size }}"
+ loop: "{{ storage_disks_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.size > 0
+ register: volume_provisioning
+
+- name: Attach IBM Cloud Block Storage volumes as filesystem for IBM Cloud VS instance
+ ibm.cloudcollection.ibm_is_instance_volume_attachment:
+ name: "{{ vol_item.resource.name }}-attach"
+ volume: "{{ vol_item.resource.id }}"
+ instance: "{{ register_provisioned_host_single.resource.id }}"
+ delete_volume_on_attachment_delete: true
+ delete_volume_on_instance_delete: true
+ loop: "{{ volume_provisioning.results }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.resource.name }}"
+
+
+- name: Read IBM Cloud VS information
+ ibm.cloudcollection.ibm_is_instance:
+ name: "{{ register_provisioned_host_single.resource.name }}"
+ register: instance_info
+
+- name: Add host facts
+ ansible.builtin.set_fact:
+ volume_provisioning: "{{ volume_provisioning }}"
+ instance_info: "{{ instance_info }}"
+ delegate_to: "{{ inventory_hostname }}"
+ delegate_facts: true
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single.resource.primary_network_interface[0].primary_ipv4_address }}"
+
+
+- name: Copy facts to delegate host
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ delegate_sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ delegate_sap_vm_provision_bastion_public_ip: "{{ sap_vm_provision_bastion_public_ip }}"
+ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}"
+ delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ delegate_private_ip: "{{ register_provisioned_host_single.resource.primary_network_interface[0].primary_ipv4_address }}"
+ delegate_hostname: "{{ inventory_hostname }}"
+ delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}"
+
+
+### begin block, parameters will be applied to each task within the block
+- name: Allow login from root OS User
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ vars:
+ ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ block:
+
+ - name: Create .ssh directory for root user
+ ansible.builtin.file:
+ path: /root/.ssh
+ state: directory
+ mode: '0744'
+
+ - name: Create root authorized_keys file and entries
+ ansible.builtin.copy:
+ dest: /root/.ssh/authorized_keys
+ mode: '0600'
+ content: |
+ {{ register_ibmcloud_ssh_public_key.resource.public_key }}
+
+ - name: Permit root login
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '(^PermitRootLogin no)'
+ replace: 'PermitRootLogin yes'
+ register: sshd_config
+
+ - name: Reload sshd service
+ ansible.builtin.service:
+ name: sshd
+ state: reloaded
+ when:
+ - sshd_config.changed
+
+### end of block
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml
new file mode 100644
index 0000000..bc0efc1
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/execute_setup_ha.yml
@@ -0,0 +1,1027 @@
+---
+
+- name: Create IBM Cloud IAM Authorization Policy for IBM Cloud Load Balancer to communicate with IBM Cloud Private DNS
+ register: ibmcloud_iam_auth_policy
+ ibm.cloudcollection.ibm_iam_authorization_policy:
+ roles: Manager
+ source_resource_group_id: "{{ register_ibmcloud_resource_group.resource.id }}"
+ target_resource_group_id: "{{ register_ibmcloud_resource_group.resource.id }}"
+ source_service_name: is
+ source_resource_type: load-balancer
+ target_service_name: dns-svcs
+ failed_when: not ibmcloud_iam_auth_policy.rc == 0 and not 'access policy with identical attributes already exists' in ibmcloud_iam_auth_policy.stderr
+
+
+# The IBM Cloud Load Balancer is provisioned before Linux Pacemaker and requires a temporary Health Check Probe port to be used with an an active OS service listening.
+# During initial installation, the Health Check Port probe will use Port 55550/55551/55552 with netcat listening, and be switched after the SAP Installation and Linux Pacemaker installation to the correct port number.
+#
+# This is because an IBM Cloud Load Balancer Backend Pool Health Check probe is executed to all Backend Pool Members (Server instances) ,
+# and if the host is not listening on the port it will fail, which will mark the resource as unhealthy,
+# the traffic will not be routed to the host and if all hosts fail then the IBM Cloud Load Balancer Frontend IP will also fail causing SAP software failures.
+#
+# NOTE - use of weighted round-robin is to mitigate any potential risk of misconfiguration (both primary and secondary nodes showing as active), by using logic 'for every 100 connections send 1 connection to secondary' when both are hosts are active.
+
+# Private Application Load Balancer (Layer 7), allows for expansion across multiple Availability Zones within a Region. Permits TCP and HTTP/S, does not permit UDP.
+# Private Network Load Balancer (Layer 4), restricts HA within single Availability Zone. Permits TCP and UDP, does not permit HTTP/S.
+# Use async with poll '0' to create all IBM Cloud Load Balancers in parallel
+- name: Create IBM Cloud Load Balancer (Private ALB) for each Virtual Hostname / Virtual IP required
+ register: ibmcloud_lb_provision_parallel
+ ibm.cloudcollection.ibm_is_lb:
+ resource_group: "{{ register_ibmcloud_resource_group.resource.id }}"
+ name: "{{ item }}"
+ type: private
+ subnets: ["{{ register_ibmcloud_vpc_subnet.resource.id }}"]
+ security_groups: "{{ register_ibmcloud_vpc_sg.results | map(attribute='resource.id') }}"
+ #access_tags:
+ logging: true # For ALB L7, not NLB L4
+ #profile: network-fixed / dynamic # For NLB L4, not ALB L7
+ #route_mode: false # For NLB L4, not ALB L7
+ # dns: # Unsupported by legacy Ansible Collection, use IBM Cloud CLI as workaround
+ # instance_crn: "{{ register_ibmcloud_pdns_service_instance.resource.resource_crn }}"
+ # zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ loop:
+ - "{{ 'lb-sap-ha-hana' if (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) else '' }}"
+ - "{{ 'lb-sap-ha-anydb' if (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) else '' }}"
+ - "{{ 'lb-sap-ha-nwas-ascs' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}"
+ - "{{ 'lb-sap-ha-nwas-ers' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}"
+ loop_control:
+ label: "Waiting for {{ item }}"
+ when: not item == ''
+ async: 1200 # seconds maximum execution
+ poll: 0 # parallel
+
+- name: Wait for IBM Cloud Load Balancer provisioning
+ register: async_status_ibmcloud_lb_provision_parallel
+ ansible.builtin.async_status:
+ jid: "{{ item.ansible_job_id }}"
+ retries: 40
+ delay: 30 # seconds
+ until: async_status_ibmcloud_lb_provision_parallel.finished
+ loop: "{{ ibmcloud_lb_provision_parallel.results | selectattr('ansible_job_id', 'defined') }}"
+ # failed_when: not async_status_ibmcloud_lb_provision_parallel.rc == 0
+
+# Workaround to missing Ansible Module functionality in legacy Ansible Collection
+- name: IBM Cloud CLI append DNS to Load Balancer
+ register: ibmcloud_lb_update_dns
+ ansible.builtin.shell: |
+ ibmcloud config --quiet --check-version false && ibmcloud login -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet
+ ibmcloud plugin install infrastructure-service -f --quiet
+ ibmcloud is load-balancer-update "{{ item }}" --dns-instance-crn "{{ register_ibmcloud_pdns_service_instance.resource.resource_crn }}" --dns-zone-id "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ loop:
+ - "{{ 'lb-sap-ha-hana' if (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0)) else '' }}"
+ - "{{ 'lb-sap-ha-anydb' if (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0)) else '' }}"
+ - "{{ 'lb-sap-ha-nwas-ascs' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}"
+ - "{{ 'lb-sap-ha-nwas-ers' if (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0)) else '' }}"
+ when: not item == ''
+ failed_when: not ibmcloud_lb_update_dns.rc == 0 and not 'nothing to update the load balancer with' in ibmcloud_lb_update_dns.stderr
+
+- name: Identify IBM Cloud Virtual Servers info
+ register: ibmcloud_vs_all_info
+ ibm.cloudcollection.ibm_is_instances_info:
+ vpc: "{{ register_ibmcloud_vpc_subnet.resource.vpc }}"
+
+
+# Workaround for bug which populates region and ibmcloud_api_key as TF arguments for ibm_is_lbs_info Ansible Module in legacy Ansible Collection
+- name: IBM Cloud CLI execution to list Load Balancer/s info
+ register: ibmcloud_lbs_all_info_shell
+ ansible.builtin.shell: |
+ ibmcloud config --quiet --check-version false && ibmcloud login -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet
+ #ibmcloud plugin install infrastructure-service -f --quiet
+ ibmcloud is load-balancers --quiet --output json
+
+- name: Set fact for IBM Cloud Load Balancer/s info
+ ansible.builtin.set_fact:
+ ibmcloud_lbs_all_info: "{{ ibmcloud_lbs_all_info_shell.stdout | split('Space:') | last | trim | from_json }}"
+
+
+# Create IBM Cloud Load Balancer Back-end Pools
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP HANA - System DB SQL
+ register: __ibmcloud_lb_pool_hana1
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-hana-pool-sysdb-sql
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55550
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP HANA - MDC Tenant 1 SQL
+ register: __ibmcloud_lb_pool_hana2
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-hana-pool-mdc1-sql
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55550
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP HANA - startsrv HTTP
+ register: __ibmcloud_lb_pool_hana3
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-hana-pool-startsrv-http
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55550
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP HANA - startsrv HTTPS
+ register: __ibmcloud_lb_pool_hana4
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-hana-pool-startsrv-https
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55550
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP AnyDB - IBM Db2 Communication Port
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-anydb-pool-ibmdb2
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55550
+ when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Dispatcher sapdp process
+ register: __ibmcloud_lb_pool_nwas_ascs1
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ascs-pool-dp
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55551
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Message Server sapms process
+ register: __ibmcloud_lb_pool_nwas_ascs2
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ascs-pool-ms
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55551
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - Enqueue Server sapenq process
+ register: __ibmcloud_lb_pool_nwas_ascs3
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ascs-pool-enq
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55551
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process
+ register: __ibmcloud_lb_pool_nwas_ascs4
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ascs-pool-sapctrl
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55551
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls
+ register: __ibmcloud_lb_pool_nwas_ascs5
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ascs-pool-sapctrls
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55551
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+# - name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ERS - Dispatcher sapdp process
+# register: __ibmcloud_lb_pool_nwas_ers1
+# ibm.cloudcollection.ibm_is_lb_pool:
+# name: lb-sap-ha-nwas-ers-pool-dp
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# algorithm: weighted_round_robin
+# protocol: tcp
+# health_delay: 20
+# health_retries: 2
+# health_timeout: 10
+# health_type: tcp
+# health_monitor_port: 55552
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+# - name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ERS - Message Server sapms process
+# register: __ibmcloud_lb_pool_nwas_ers2
+# ibm.cloudcollection.ibm_is_lb_pool:
+# name: lb-sap-ha-nwas-ers-pool-ms
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# algorithm: weighted_round_robin
+# protocol: tcp
+# health_delay: 20
+# health_retries: 2
+# health_timeout: 10
+# health_type: tcp
+# health_monitor_port: 55552
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process
+ register: __ibmcloud_lb_pool_nwas_ers3
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ers-pool-enqr
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55552
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process
+ register: __ibmcloud_lb_pool_nwas_ers4
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ers-pool-sapctrl
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55552
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+- name: Create IBM Cloud Load Balancer Back-end Pool for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls
+ register: __ibmcloud_lb_pool_nwas_ers5
+ ibm.cloudcollection.ibm_is_lb_pool:
+ name: lb-sap-ha-nwas-ers-pool-sapctrls
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 55552
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+
+# Identify the provisioned IBM Cloud Load Balancer Back-end Pools
+
+- name: Identify IBM Cloud Load Balancer Back-end Pools
+ register: __ibmcloud_lb_pools
+ ibm.cloudcollection.ibm_is_lb_pools_info:
+ lb: "{{ item }}"
+ loop: "{{ ibmcloud_lbs_all_info | map(attribute='id') }}"
+
+
+# Append Server Members to the IBM Cloud Load Balancer Back-end Pools
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - System DB SQL - Member 1
+ register: __ibmcloud_lb_pool_members_hana1
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}"
+ weight: 100
+ loop: "{{ (groups['hana_primary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana1.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana1.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - System DB SQL - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_hana2
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}"
+ weight: 1
+ loop: "{{ (groups['hana_secondary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana2.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana2.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - MDC Tenant 1 SQL - Member 1
+ register: __ibmcloud_lb_pool_members_hana3
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}"
+ weight: 100
+ loop: "{{ (groups['hana_primary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana3.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana3.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - MDC Tenant 1 SQL - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_hana4
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}"
+ weight: 1
+ loop: "{{ (groups['hana_secondary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana4.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana4.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - startsrv HTTP - Member 1
+ register: __ibmcloud_lb_pool_members_hana5
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}"
+ weight: 100
+ loop: "{{ (groups['hana_primary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana5.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana5.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - startsrv HTTP - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_hana6
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}"
+ weight: 1
+ loop: "{{ (groups['hana_secondary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana6.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana6.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - startsrv HTTPS - Member 1
+ register: __ibmcloud_lb_pool_members_hana7
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}"
+ weight: 100
+ loop: "{{ (groups['hana_primary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana7.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana7.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP HANA - startsrv HTTPS - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_hana8
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}"
+ weight: 1
+ loop: "{{ (groups['hana_secondary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_hana8.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_hana8.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP AnyDB - IBM Db2 Communication Port - Member 1
+ register: __ibmcloud_lb_pool_members_anydb1
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: 5912
+ weight: 100
+ loop: "{{ (groups['anydb_primary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_anydb1.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_anydb1.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP AnyDB - IBM Db2 Communication Port - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_anydb2
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: 5912
+ weight: 1
+ loop: "{{ (groups['anydb_secondary'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_anydb2.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_anydb2.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Dispatcher sapdp process - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ascs1
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs1.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs1.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Dispatcher sapdp process - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ascs2
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs2.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs2.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Message Server sapms process - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ascs3
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs3.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs3.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Message Server sapms process - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ascs4
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs4.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs4.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Server sapenq process - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ascs5
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs5.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs5.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - Enqueue Server sapenq process - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ascs6
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs6.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs6.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ascs7
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs7.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs7.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ascs8
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs8.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs8.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ascs9
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs9.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs9.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ascs10
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ascs10.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ascs10.stderr
+
+
+# Primary
+# - name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Dispatcher sapdp process - Member 1
+# register: __ibmcloud_lb_pool_members_nwas_ers1
+# ibm.cloudcollection.ibm_is_lb_pool_member:
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp'))[0].id }}"
+# target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+# port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+# weight: 100
+# loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+# loop_control:
+# loop_var: host_node
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+# failed_when: not __ibmcloud_lb_pool_members_nwas_ers1.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers1.stderr
+
+# Secondary
+# - name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Dispatcher sapdp process - Member 2 (Failover/Secondary)
+# register: __ibmcloud_lb_pool_members_nwas_ers2
+# ibm.cloudcollection.ibm_is_lb_pool_member:
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp'))[0].id }}"
+# target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+# port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+# weight: 1
+# loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+# loop_control:
+# loop_var: host_node
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+# failed_when: not __ibmcloud_lb_pool_members_nwas_ers2.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers2.stderr
+
+
+# Primary
+# - name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Message Server sapms process - Member 1
+# register: __ibmcloud_lb_pool_members_nwas_ers3
+# ibm.cloudcollection.ibm_is_lb_pool_member:
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms'))[0].id }}"
+# target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+# port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+# weight: 100
+# loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+# loop_control:
+# loop_var: host_node
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+# failed_when: not __ibmcloud_lb_pool_members_nwas_ers3.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers3.stderr
+
+# Secondary
+# - name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Message Server sapms process - Member 2 (Failover/Secondary)
+# register: __ibmcloud_lb_pool_members_nwas_ers4
+# ibm.cloudcollection.ibm_is_lb_pool_member:
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms'))[0].id }}"
+# target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+# port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+# weight: 1
+# loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+# loop_control:
+# loop_var: host_node
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+# failed_when: not __ibmcloud_lb_pool_members_nwas_ers4.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers4.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ers5
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ers5.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers5.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ers6
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ers6.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers6.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ers7
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ers7.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers7.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ers8
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ers8.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers8.stderr
+
+
+# Primary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 1
+ register: __ibmcloud_lb_pool_members_nwas_ers9
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}"
+ weight: 100
+ loop: "{{ (groups['nwas_ers'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ers9.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers9.stderr
+
+# Secondary
+- name: Create IBM Cloud Load Balancer Back-end Pool Members for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls - Member 2 (Failover/Secondary)
+ register: __ibmcloud_lb_pool_members_nwas_ers10
+ ibm.cloudcollection.ibm_is_lb_pool_member:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls'))[0].id }}"
+ target_address: "{{ (ibmcloud_vs_all_info.resource.instances | selectattr('name', '==', host_node))[0].primary_network_interface[0].primary_ipv4_address }}"
+ port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}"
+ weight: 1
+ loop: "{{ (groups['nwas_ascs'] | default([]) ) }}"
+ loop_control:
+ loop_var: host_node
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_pool_members_nwas_ers10.rc == 0 and not 'already exists in a pool' in __ibmcloud_lb_pool_members_nwas_ers10.stderr
+
+
+# Create IBM Cloud Load Balancer Front-end Listeners (open port for Virtual IPs)
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - System DB SQL
+ register: __ibmcloud_lb_frontend_listener_hana1
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('3' + sap_system_hana_db_instance_nr + '13') | int }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_hana1.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_hana1.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - MDC Tenant 1 SQL
+ register: __ibmcloud_lb_frontend_listener_hana2
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('3' + sap_system_hana_db_instance_nr + '15') | int }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_hana2.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_hana2.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - startsrv HTTP
+ register: __ibmcloud_lb_frontend_listener_hana3
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('5' + sap_system_hana_db_instance_nr + '13') | int }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_hana3.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_hana3.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP HANA - startsrv HTTPS
+ register: __ibmcloud_lb_frontend_listener_hana4
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('5' + sap_system_hana_db_instance_nr + '14') | int }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_hana4.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_hana4.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP AnyDB - IBM Db2 Communication Port
+ register: __ibmcloud_lb_frontend_listener_anydb1
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2'))[0].id }}"
+ protocol: tcp
+ port: 5912
+ when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_anydb1.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_anydb1.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Dispatcher sapdp process
+ register: __ibmcloud_lb_frontend_listener_ascs1
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('32' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ascs1.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ascs1.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Message Server sapms process
+ register: __ibmcloud_lb_frontend_listener_ascs2
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('36' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ascs2.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ascs2.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - Enqueue Server sapenq process
+ register: __ibmcloud_lb_frontend_listener_ascs3
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('39' + sap_system_nwas_abap_ascs_instance_nr) | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ascs3.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ascs3.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process
+ register: __ibmcloud_lb_frontend_listener_ascs4
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '13') | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ascs4.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ascs4.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls
+ register: __ibmcloud_lb_frontend_listener_ascs5
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('5' + sap_system_nwas_abap_ascs_instance_nr + '14') | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ascs5.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ascs5.stderr
+
+# - name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Dispatcher sapdp process
+# register: __ibmcloud_lb_frontend_listener_ers1
+# ibm.cloudcollection.ibm_is_lb_listener:
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp'))[0].id }}"
+# protocol: tcp
+# port: "{{ ('32' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+# failed_when: not __ibmcloud_lb_frontend_listener_ers1.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ers1.stderr
+
+# - name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Message Server sapms process
+# register: __ibmcloud_lb_frontend_listener_ers2
+# ibm.cloudcollection.ibm_is_lb_listener:
+# lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+# default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms'))[0].id }}"
+# protocol: tcp
+# port: "{{ ('36' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+# when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+# failed_when: not __ibmcloud_lb_frontend_listener_ers2.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ers2.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process
+ register: __ibmcloud_lb_frontend_listener_ers3
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('39' + sap_system_nwas_abap_ers_instance_nr) | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ers3.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ers3.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process
+ register: __ibmcloud_lb_frontend_listener_ers4
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '13') | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ers4.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ers4.stderr
+
+- name: Create IBM Cloud Load Balancer Front-end Listener for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls
+ register: __ibmcloud_lb_frontend_listener_ers5
+ ibm.cloudcollection.ibm_is_lb_listener:
+ lb: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}"
+ default_pool: "{{ (__ibmcloud_lb_pools.results | json_query('[*].resource.pools') | flatten | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls'))[0].id }}"
+ protocol: tcp
+ port: "{{ ('5' + sap_system_nwas_abap_ers_instance_nr + '14') | int }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+ failed_when: not __ibmcloud_lb_frontend_listener_ers5.rc == 0 and not 'listener_duplicate_port' in __ibmcloud_lb_frontend_listener_ers5.stderr
+
+
+# Set DNS A Record for Virtual IP (use the first of the IBM Cloud Load Balancer instance assigned Private IPs in the VPC Subnet Range)
+
+- name: IBM Cloud Private DNS Record for SAP HANA HA Virtual Hostname
+ register: register_ibmcloud_pdns_record_ha_hana
+ ibm.cloudcollection.ibm_dns_resource_record:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+ zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN
+ rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].private_ips[0].address }}" # IP Address
+ type: A
+ ttl: 7200
+ failed_when: not register_ibmcloud_pdns_record.rc == 0 and not 'The record already exists' in register_ibmcloud_pdns_record.stderr
+ loop: "{{ (groups['hana_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+- name: IBM Cloud Private DNS Record for SAP AnyDB HA Virtual Hostname
+ register: register_ibmcloud_pdns_record_ha_anydb
+ ibm.cloudcollection.ibm_dns_resource_record:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+ zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ name: "{{ sap_swpm_db_host }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN
+ rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].private_ips[0].address }}" # IP Address
+ type: A
+ ttl: 7200
+ failed_when: not register_ibmcloud_pdns_record.rc == 0 and not 'The record already exists' in register_ibmcloud_pdns_record.stderr
+ loop: "{{ (groups['anydb_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+- name: IBM Cloud Private DNS Record for SAP NetWeaver ASCS HA Virtual Hostname
+ register: register_ibmcloud_pdns_record_ha_nwas_ascs
+ ibm.cloudcollection.ibm_dns_resource_record:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+ zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ name: "{{ sap_swpm_ascs_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN
+ rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].private_ips[0].address }}" # IP Address
+ type: A
+ ttl: 7200
+ failed_when: not register_ibmcloud_pdns_record.rc == 0 and not 'The record already exists' in register_ibmcloud_pdns_record.stderr
+ loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+- name: IBM Cloud Private DNS Record for SAP NetWeaver ERS HA Virtual Hostname
+ register: register_ibmcloud_pdns_record_ha_nwas_ers
+ ibm.cloudcollection.ibm_dns_resource_record:
+ instance_id: "{{ register_ibmcloud_pdns_service_instance.resource.guid }}"
+ zone_id: "{{ (register_ibmcloud_pdns.resource.dns_zones | selectattr('name', '==', sap_vm_provision_dns_root_domain) | first).zone_id }}"
+ name: "{{ sap_swpm_ers_instance_hostname }}.{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}" # Host FQDN
+ rdata: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].private_ips[0].address }}" # IP Address
+ type: A
+ ttl: 7200
+ failed_when: not register_ibmcloud_pdns_record.rc == 0 and not 'The record already exists' in register_ibmcloud_pdns_record.stderr
+ loop: "{{ (groups['nwas_ers'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+- name: Set facts for all hosts - use facts from IBM Cloud Load Balancer - SAP HANA
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].private_ips[0].address }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+
+- name: Set facts for all hosts - use facts from IBM Cloud Load Balancer - SAP AnyDB
+ ansible.builtin.set_fact:
+ sap_vm_temp_vip_anydb_primary: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].private_ips[0].address }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+
+- name: Set facts for all hosts - use facts from IBM Cloud Load Balancer - SAP NetWeaver
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].private_ips[0].address }}"
+ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].private_ips[0].address }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml
new file mode 100644
index 0000000..93afaa0
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmcloud_vs/post_deployment_execute.yml
@@ -0,0 +1,311 @@
+---
+
+# The IBM Cloud Load Balancer is provisioned before Linux Pacemaker and requires a temporary Health Check Probe port to be used with an an active OS service listening.
+# During initial installation, the Health Check Port probe will use Port 55550/55551/55552 with netcat listening, and be switched after the SAP Installation and Linux Pacemaker installation to the correct port number.
+#
+# This is because an IBM Cloud Load Balancer Backend Pool Health Check probe is executed to all Backend Pool Members (Server instances) ,
+# and if the host is not listening on the port it will fail, which will mark the resource as unhealthy,
+# the traffic will not be routed to the host and if all hosts fail then the IBM Cloud Load Balancer Frontend IP will also fail causing SAP software failures.
+#
+# NOTE - use of weighted round-robin is to mitigate any potential risk of misconfiguration (both primary and secondary nodes showing as active), by using logic 'for every 100 connections send 1 connection to secondary' when both are hosts are active.
+
+- name: Ansible Task block for amending Load Balancer ports for High Availability - after provisioning IBM Cloud VS instances
+ delegate_to: localhost
+ run_once: true
+ environment:
+ IC_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}" # For legacy Ansible Collection
+ IBMCLOUD_API_KEY: "{{ sap_vm_provision_ibmcloud_api_key }}" # For IBM Cloud CLI quiet login
+ IC_REGION: "{{ sap_vm_provision_ibmcloud_region }}"
+ when:
+ - sap_ha_pacemaker_cluster_ibmcloud_region is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Inherit variable - set fact for IBM Cloud Load Balancer Pool Health Check - SAP HANA
+ ansible.builtin.set_fact:
+ ibmcloud_lb_pool_healthcheck_hana: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_hana_primary_port is defined
+
+ - name: Inherit variable - set fact for IBM Cloud Load Balancer Pool Health Check - SAP NWAS ASCS
+ ansible.builtin.set_fact:
+ ibmcloud_lb_pool_healthcheck_nwas_ascs: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port is defined
+
+ - name: Inherit variable - set fact for IBM Cloud Load Balancer Pool Health Check - SAP NWAS ERS
+ ansible.builtin.set_fact:
+ ibmcloud_lb_pool_healthcheck_nwas_ers: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port is defined
+
+ - name: Default variable - Set fact for IBM Cloud Load Balancer Pool Health Check - SAP HANA
+ ansible.builtin.set_fact:
+ ibmcloud_lb_pool_healthcheck_hana: "{{ ('620' + (sap_system_hana_db_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_hana_primary_port is defined
+
+ - name: Default variable - Set fact for IBM Cloud Load Balancer Pool Health Check - SAP NWAS ASCS
+ ansible.builtin.set_fact:
+ ibmcloud_lb_pool_healthcheck_nwas_ascs: "{{ ('620' + (sap_system_nwas_abap_ascs_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port is defined
+
+ - name: Default variable - Set fact for IBM Cloud Load Balancer Pool Health Check - SAP NWAS ERS
+ ansible.builtin.set_fact:
+ ibmcloud_lb_pool_healthcheck_nwas_ers: "{{ ('620' + (sap_system_nwas_abap_ers_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port is defined
+
+
+ # Workaround for bug which populates region and ibmcloud_api_key as TF arguments for ibm_is_lbs_info Ansible Module in legacy Ansible Collection
+ - name: IBM Cloud CLI execution to list Load Balancer/s info
+ register: ibmcloud_lbs_all_info_shell
+ ansible.builtin.shell: |
+ ibmcloud config --quiet --check-version false && ibmcloud login -g {{ sap_vm_provision_ibmcloud_resource_group_name }} -r {{ sap_vm_provision_ibmcloud_region }} --quiet
+ #ibmcloud plugin install infrastructure-service -f --quiet
+ ibmcloud is load-balancers --quiet --output json
+
+ - name: Set fact for IBM Cloud Load Balancer/s info
+ ansible.builtin.set_fact:
+ ibmcloud_lbs_all_info: "{{ ibmcloud_lbs_all_info_shell.stdout | split('Space:') | last | trim | from_json }}"
+
+ - name: Set fact for IBM Cloud Load Balancer Back-end Pools to target
+ ansible.builtin.set_fact:
+ ibmcloud_lbs_target_pools: []
+
+ - name: Update fact for IBM Cloud Load Balancer Back-end Pools to target
+ ansible.builtin.set_fact:
+ ibmcloud_lbs_target_pools: "{{ ibmcloud_lbs_target_pools + [{ item.id : [( item.pools | json_query('[*].id') )] }] }}"
+ loop: "{{ ibmcloud_lbs_all_info }}"
+ loop_control:
+ label: "{{ item.name }}"
+ when: "('lb-sap-ha-hana' in item.name) or ('lb-sap-ha-anydb' in item.name) or ('lb-sap-ha-nwas-ascs' in item.name) or ('lb-sap-ha-nwas-ers' in item.name)"
+
+ # - name: Identify IBM Cloud Load Balancer Back-end Pools
+ # register: __ibmcloud_lb_pools
+ # ibm.cloudcollection.ibm_is_lb_pools_info:
+ # lb: "{{ item.id }}"
+ # loop: "{{ ibmcloud_lbs_all_info }}"
+ # loop_control:
+ # label: "{{ item.name }}"
+ # when: "('lb-sap-ha-hana' in item.name) or ('lb-sap-ha-anydb' in item.name) or ('lb-sap-ha-nwas-ascs' in item.name) or ('lb-sap-ha-nwas-ers' in item.name)"
+
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP HANA - System DB SQL
+ register: __ibmcloud_lb_pool_hana1
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-sysdb-sql') | first).id }}"
+ name: lb-sap-ha-hana-pool-sysdb-sql
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_hana }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP HANA - MDC Tenant 1 SQL
+ register: __ibmcloud_lb_pool_hana2
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-mdc1-sql') | first).id }}"
+ name: lb-sap-ha-hana-pool-mdc1-sql
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_hana }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP HANA - startsrv HTTP
+ register: __ibmcloud_lb_pool_hana3
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-http') | first).id }}"
+ name: lb-sap-ha-hana-pool-startsrv-http
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_hana }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP HANA - startsrv HTTPS
+ register: __ibmcloud_lb_pool_hana4
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-hana'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-hana-pool-startsrv-https') | first).id }}"
+ name: lb-sap-ha-hana-pool-startsrv-https
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_hana }}"
+ when: (groups['hana_secondary'] is defined and (groups['hana_secondary'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP AnyDB - IBM Db2 Communication Port
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-anydb'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-anydb-pool-ibmdb2') | first).id }}"
+ name: lb-sap-ha-anydb-pool-ibmdb2
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: 62700
+ when: (groups['anydb_secondary'] is defined and (groups['anydb_secondary'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Dispatcher sapdp process
+ register: __ibmcloud_lb_pool_nwas_ascs1
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-dp') | first).id }}"
+ name: lb-sap-ha-nwas-ascs-pool-dp
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Message Server sapms process
+ register: __ibmcloud_lb_pool_nwas_ascs2
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-ms') | first).id }}"
+ name: lb-sap-ha-nwas-ascs-pool-ms
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - Enqueue Server sapenq process
+ register: __ibmcloud_lb_pool_nwas_ascs3
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-enq') | first).id }}"
+ name: lb-sap-ha-nwas-ascs-pool-enq
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process
+ register: __ibmcloud_lb_pool_nwas_ascs4
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrl') | first).id }}"
+ name: lb-sap-ha-nwas-ascs-pool-sapctrl
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ASCS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls
+ register: __ibmcloud_lb_pool_nwas_ascs5
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ascs'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ascs-pool-sapctrls') | first).id }}"
+ name: lb-sap-ha-nwas-ascs-pool-sapctrls
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ascs }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ # - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ERS - Dispatcher sapdp process
+ # register: __ibmcloud_lb_pool_nwas_ers1
+ # ibm.cloudcollection.ibm_is_lb_pool:
+ # id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-dp') | first).id }}"
+ # name: lb-sap-ha-nwas-ers-pool-dp
+ # # lb: # Do not use, will force create new resource
+ # algorithm: weighted_round_robin
+ # protocol: tcp
+ # health_delay: 20
+ # health_retries: 2
+ # health_timeout: 10
+ # health_type: tcp
+ # health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ers }}"
+ # when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ # - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ERS - Message Server sapms process
+ # register: __ibmcloud_lb_pool_nwas_ers2
+ # ibm.cloudcollection.ibm_is_lb_pool:
+ # id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-ms') | first).id }}"
+ # name: lb-sap-ha-nwas-ers-pool-ms
+ # # lb: # Do not use, will force create new resource
+ # algorithm: weighted_round_robin
+ # protocol: tcp
+ # health_delay: 20
+ # health_retries: 2
+ # health_timeout: 10
+ # health_type: tcp
+ # health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ers }}"
+ # when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ERS - Enqueue Replication Server sapenqr process
+ register: __ibmcloud_lb_pool_nwas_ers3
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-enqr') | first).id }}"
+ name: lb-sap-ha-nwas-ers-pool-enqr
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ers }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTP sapctrl process
+ register: __ibmcloud_lb_pool_nwas_ers4
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrl') | first).id }}"
+ name: lb-sap-ha-nwas-ers-pool-sapctrl
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ers }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
+
+ - name: Update IBM Cloud Load Balancer Back-end Pool Health Check Port to Linux Pacemaker controlled listening port for SAP NetWeaver ERS - SAP Start Service (SAPControl SOAP) HTTPS (Secure) sapctrls
+ register: __ibmcloud_lb_pool_nwas_ers5
+ ibm.cloudcollection.ibm_is_lb_pool:
+ id: "{{ (ibmcloud_lbs_all_info | selectattr('name', '==', 'lb-sap-ha-nwas-ers'))[0].id }}/{{ ((ibmcloud_lbs_all_info | json_query('[*].pools') | flatten) | selectattr('name', '==', 'lb-sap-ha-nwas-ers-pool-sapctrls') | first).id }}"
+ name: lb-sap-ha-nwas-ers-pool-sapctrls
+ # lb: # Do not use, will force create new resource
+ algorithm: weighted_round_robin
+ protocol: tcp
+ health_delay: 20
+ health_retries: 2
+ health_timeout: 10
+ health_type: tcp
+ health_monitor_port: "{{ ibmcloud_lb_pool_healthcheck_nwas_ers }}"
+ when: (groups['nwas_ers'] is defined and (groups['nwas_ers'] | length>0))
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml
new file mode 100644
index 0000000..f878904
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_main.yml
@@ -0,0 +1,127 @@
+---
+
+# Method 1 for auth, set fact and reuse with validate_certs on each Ansible Task
+- name: Openstack Authentication - Method 1
+ ansible.builtin.set_fact:
+ openstack_auth:
+ auth_url: "{{ sap_vm_provision_ibmpowervm_vc_auth_endpoint }}"
+ username: "{{ sap_vm_provision_ibmpowervm_vc_user }}"
+ password: "{{ sap_vm_provision_ibmpowervm_vc_user_password }}"
+ project_name: "{{ sap_vm_provision_ibmpowervm_vc_project_name }}"
+ project_domain_name: "default" # If blank will cause error "Expecting to find domain in project"
+ user_domain_name: "default" # If blank will cause error "Expecting to find domain in user"
+
+# Method 2 for auth, obtain token and subsequently set each task with environment e.g. OS_TOKEN: "{{ openstack_session.auth_token }}"
+# Use if requiring direct API call or CLI commands
+# - name: Openstack Authentication - Method 2
+# openstack.cloud.auth:
+# auth:
+# auth_url: "{{ sap_vm_provision_ibmpowervm_vc_auth_endpoint }}"
+# username: "{{ sap_vm_provision_ibmpowervm_vc_user }}"
+# password: "{{ sap_vm_provision_ibmpowervm_vc_user_password }}"
+# project_name: "{{ sap_vm_provision_ibmpowervm_vc_project_name }}"
+# project_domain_name: "default" # If blank will cause error "Expecting to find domain in project"
+# user_domain_name: "default" # If blank will cause error "Expecting to find domain in user"
+# interface: internal # internal, public, admin
+# validate_certs: false # Allow Self-Signed Certificate
+# wait: true
+# when: openstack_auth is undefined or not openstack_auth
+# register: openstack_session
+
+- name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+- name: Create IBM PowerVM SSH Key Pair
+ openstack.cloud.keypair:
+ auth: "{{ openstack_auth }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ state: present
+ name: "{{ sap_vm_provision_ibmpowervm_key_pair_name_ssh_host_public_key }}"
+ public_key: "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}"
+ throttle: 1
+
+- name: Provision hosts to IBM PowerVM
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ vars:
+ openstack_auth_delegate: "{{ openstack_auth }}"
+
+- name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].access_ipv4 }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+- name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+- name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+ - name: Register Package Repositories
+ ansible.builtin.include_tasks:
+ file: common/register_os.yml
+
+ - name: Register Web Forward Proxy
+ ansible.builtin.include_tasks:
+ file: common/register_proxy.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml
new file mode 100644
index 0000000..31ba15b
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/execute_provision.yml
@@ -0,0 +1,357 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# NOTE: Pre-requisite to create IBM PowerVC Storage Templates (OpenStack Cinder Volume Type), which is not possible from Ansible Collection for Openstack
+# https://www.ibm.com/docs/en/powervc/1.4.3?topic=apis-supported-volume-type-extra-specs
+### Show IBM PowerVC Storage list
+### openstack --insecure volume service list --service cinder-volume
+### Show IBM PowerVC Storage Template list
+### openstack --insecure volume type list
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+- name: Check if VM exists
+ register: register_check_vm_exists
+ openstack.cloud.server_info:
+ auth: "{{ openstack_auth_delegate }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ name: "{{ inventory_hostname }}"
+
+- name: Check OS Image available in IBM PowerVM
+ register: register_ibmpowervm_available_os
+ openstack.cloud.image_info:
+ auth: "{{ openstack_auth_delegate }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ image: "{{ sap_vm_provision_ibmpowervm_vm_host_os_image }}"
+
+- name: Check network available in IBM PowerVM
+ openstack.cloud.networks_info:
+ auth: "{{ openstack_auth }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ name: "{{ sap_vm_provision_ibmpowervm_network_name }}"
+ register: register_ibmpowervm_available_network
+
+
+# VM creation block:
+# This block is run when the VM does not exist yet.
+#
+- name: Block that provisions the VM
+ when:
+ - register_check_vm_exists.servers is defined
+ - register_check_vm_exists.servers | length == 0
+ block:
+
+ # See documented IBM PowerVM Compute Template (OpenStack Flavor) extra specs - https://www.ibm.com/docs/en/powervc-cloud/latest?topic=apis-flavors-extra-specs
+ - name: Create IBM PowerVM Compute Template
+ register: register_ibmpowervm_compute_template
+ openstack.cloud.compute_flavor:
+ auth: "{{ openstack_auth }}"
+ validate_certs: false # Allow Self-Signed Certificate
+
+ state: present
+ name: "{{ inventory_hostname }}-compute-template"
+
+ ## Virtual Machine main resources definition
+ # Assume SMT-8, 1 IBM Power CPU Core therefore divide by 8 = CPU Threads
+ vcpus: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) | round(0, 'common') | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Desired. API must receive an integer
+ ram: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024) | int }}" # Memory (MiB), Desired
+ disk: 0 # Must be set to 0 otherwise conflicts with OS Image template
+ swap: 0 # Must be set to 0 otherwise error "failed with exception: Build of instance xxxx was re-scheduled: list index out of range"
+ is_public: true
+
+ # After creation, modifications to extra_specs parameters may not be identified
+ extra_specs:
+ #### Virtual Processors (i.e. IBM Power CPU Cores) - for Production systems must be minimum of 4 ####
+ "powervm:min_vcpu": "{{ [((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 0.75) | round(0, 'floor'), 1] | max | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Minimum. Value of 1 is lowest possible. API must receive an integer
+ "powervm:max_vcpu": "{{ ((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 1.20) | round(0, 'ceil') | int }}" # Virtual Processors (i.e. IBM Power CPU Cores), Maximum. API must receive an integer
+
+ #### Dynamic LPAR Entitled Capacity of Virtual Processor units (i.e. IBM Power CPU Cores guaranteed to be available) ####
+ # Processing units set minimum to 80% of the minimum Virtual Processors (i.e. IBM Power CPU Cores)
+ # Processing units set standard to 80% of the Virtual Processors (i.e. IBM Power CPU Cores)
+ # Processing units set maximum to 100% of the maximum Virtual Processors (i.e. IBM Power CPU Cores)
+ "powervm:min_proc_units": "{{ ((((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 0.75) | round(1, 'floor')) * 0.8) | round(2, 'floor') | float }}" # Processing units, Minimum
+ "powervm:proc_units": "{{ ((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) | round(0, 'common')) * 0.8 | round(2, 'common') | float }}" # Processing units, Desired
+ "powervm:max_proc_units": "{{ ((lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_threads / lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_cpu_smt) * 1.20) | round(0, 'ceil') | float }}" # Processing units, Maximum
+
+ "powervm:dedicated_proc": "false"
+ #"powervm:dedicated_sharing_mode": "share_idle_procs" # When 'dedicated_proc' true, share_idle_procs = "Allow processor sharing when the virtual machine is inactive"
+ "powervm:uncapped": "true"
+ "powervm:shared_weight": 128
+ "powervm:shared_proc_pool_name": "{{ sap_vm_provision_ibmpowervm_host_group_shared_procesor_pool_name }}"
+ "powervm:processor_compatibility": "default"
+
+ "powervm:min_mem": "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024) - (0.25 * (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024)) | round(0, 'ceil') | int }}" # Memory, Minimum. API must receive an integer
+ "powervm:max_mem": "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][inventory_hostname].ibmpowervm_vm_memory_gib * 1024) | round(0, 'ceil') | int }}" # Memory, Maximum. API must receive an integer
+
+ #"powervm:ame_expansion_factor": 0
+ "powervm:enforce_affinity_check": "true"
+ "powervm:enable_lpar_metric": "true"
+ "powervm:availability_priority": 127 # Default is 127, Higher Priority default is 191
+ #"powervm:ppt_ratio": "1:1024"
+ "powervm:secure_boot": 0 # "Disabled"
+ "powervm:srr_capability": "false" # Disable the Simplified Remote Restart
+
+
+ - name: Provision IBM PowerVM vNIC (Network Port)
+ register: register_provisioned_host_vnic_single
+ openstack.cloud.port:
+ auth: "{{ openstack_auth }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ state: present
+ name: "{{ inventory_hostname }}-vnic0"
+ network: "{{ sap_vm_provision_ibmpowervm_network_name }}"
+ # fixed_ips:
+ # - ip_address: "{{ ansible_host }}"
+ vnic_type: "{{ sap_vm_provision_ibmpowervm_network_vnic_type }}" # vNIC Mode is either 'direct' for SR-IOV, or 'normal' for Shared Ethernet Adapter (SEA)
+ # capacity = vNIC using SR-IOV Minimum Capacity percentage of total SR-IOV Port Bandwidth (must be above '0.02' for 2%)
+ # vnic_required_vfs = vNIC using SR-IOV Port Redundancy level, '1' for Non-Redundant and '2' for Redundant
+ binding_profile: |-
+ {% set vnic_config = [] -%}
+ {%- if sap_vm_provision_ibmpowervm_network_vnic_type == 'normal' -%}
+ {% set map = vnic_config.extend([{
+ 'delete_with_instance': '1'
+ }]) -%}
+ {%- elif sap_vm_provision_ibmpowervm_network_vnic_type == 'direct' -%}
+ {% set map = vnic_config.extend([{
+ 'delete_with_instance': '1',
+ 'capacity': '0.02',
+ 'vnic_required_vfs': '2'
+ }]) -%}
+ {%- endif -%}
+ {{ vnic_config[0] }}
+
+ - name: Provision IBM PowerVM Virtual Machine (LPAR)
+ register: register_provisioned_host_single
+ openstack.cloud.server:
+ auth: "{{ openstack_auth }}"
+ validate_certs: false # Allow Self-Signed Certificate
+
+ ## Virtual Machine target Hypervisor definition
+ availability_zone: "{{ sap_vm_provision_ibmpowervm_host_group_name }}" # IBM PowerVM Hypervisor Cluster Host Group Name
+ #region_name: "RegionOne"
+
+ ## Virtual Machine definition
+ state: present
+ wait: true # wait until VM is running before Ansible Module is marked as completed
+ timeout: 1200 # seconds, wait 20 minutes for VM to provision
+ name: "{{ inventory_hostname }}"
+ description: "{{ inventory_hostname }} created by Ansible Playbook for SAP"
+
+ ## Virtual Machine main resources definition
+ flavor: "{{ register_ibmpowervm_compute_template.flavor.id }}"
+ image: "{{ sap_vm_provision_ibmpowervm_vm_host_os_image }}" # Do not set boot_from_volume, boot_volume or volumes parameters when cloning OS Image template
+ terminate_volume: true
+ key_name: "{{ sap_vm_provision_ibmpowervm_key_pair_name_ssh_host_public_key }}"
+
+ ## Virtual Machine Network configuration - vNICs
+ auto_ip: false
+ #network: "{{ sap_vm_provision_ibmpowervm_network_name }}"
+ nics:
+ - port-name: "{{ inventory_hostname }}-vnic0"
+
+ ## Post-provisioning: Virtual Machine post configuration
+ meta:
+ hostname: "{{ inventory_hostname }}"
+ #userdata: | # cloud-init userdata
+
+ # Report VM provisioning complete, only after status is Active (not Building)
+ # If provisioning error occurs (e.g. 'Could not find image XYZ with exclude (deprecated)') then
+ # will show error as 'The conditional check register_provisioned_host_single.server.status == "ACTIVE" failed
+ # and the actual error will be hidden unless the 'until' is commented-out
+ until: register_provisioned_host_single.server.status is defined and register_provisioned_host_single.server.status == "ACTIVE"
+ retries: 120
+ delay: 5
+
+### End of boot disk and VM creation Ansible Task Block
+
+
+- name: Collect info on IBM PowerVM vNIC (Network Port)
+ register: register_provisioned_host_vnic_single
+ openstack.cloud.port_info:
+ auth: "{{ openstack_auth }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ name: "{{ inventory_hostname }}-vnic0"
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_vnic_single.ports[0].fixed_ips[0].ip_address }}"
+
+
+# OpenStack APIs cannot query the IBM PowerVC APIs directly,
+# therefore cannot check the VM Health status (Warning or OK) which
+# is based on connection status to RMC (Resource Monitoring and Control subsystem).
+# Ignore checking this fact as it does not reflect VM readiness to execute.
+
+- name: Wait for VM connection readiness
+ ansible.builtin.wait_for:
+ host: "{{ provisioned_private_ip }}"
+ port: 22
+ delay: 10
+ sleep: 10
+ timeout: 600
+
+- name: Collect only facts about hardware
+ register: host_disks_info
+ ansible.builtin.setup:
+ gather_subset:
+ - hardware
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: false
+ vars:
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+
+#- name: Output disks
+# ansible.builtin.debug:
+# var: hostvars[inventory_hostname].ansible_devices.keys() | list
+
+#- name: Debug Ansible Facts devices used list
+# ansible.builtin.debug:
+# msg: "{{ host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list }}"
+
+
+- name: Set fact for available storage volume device names
+ ansible.builtin.set_fact:
+ available_volumes: |-
+ {% set letters = 'bcdefghijklmnopqrstuvwxyz' %}
+ {% set ansible_facts_devices_used_list = host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list %}
+ {% set volumes = [] %}
+ {%- for letter in letters -%}
+ {% for device in ansible_facts_devices_used_list -%}
+ {% if '/dev/sd' + letter not in device -%}
+ {% set dev = volumes.append('/dev/sd' + letter) %}
+ {%- endif %}
+ {%- endfor %}
+ {% endfor %}
+ {{ volumes | list | unique }}
+
+#- name: Debug available_volumes
+# ansible.builtin.debug:
+# msg: "{{ available_volumes }}"
+
+# Combination of only the filesystem volume information from the lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')
+# for volume device assignment.
+# This task assigns device names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ filesystem_volume_map: |
+ {% set volume_map = [] -%}
+ {% set av_vol = available_volumes -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = volume_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'device': av_vol[0],
+ 'fstype': storage_item.filesystem_type | default('xfs'),
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('')
+ }
+ ]) %}
+ {%- set _ = av_vol.pop(0) -%}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ volume_map }}
+
+#- name: Debug filesystem_volume_map
+# ansible.builtin.debug:
+# msg: "{{ filesystem_volume_map }}"
+
+
+# The volume creation task requires the above task to define the parameter
+# which contains the calculated unique device names.
+- name: Provision Virtual Disk volumes for IBM PowerVM VM filesystems
+ openstack.cloud.volume:
+ auth: "{{ openstack_auth_delegate }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ state: present
+ name: "{{ inventory_hostname }}-vol_{{ vol_item.name }}"
+ # availability_zone: ""
+ # scheduler_hints:
+ # same_host: fs5200#IC-Cloud
+ # additional_properties:
+ # "drivers:multipath": "0"
+ size: "{{ vol_item.size }}"
+ volume_type: "{{ sap_vm_provision_ibmpowervm_storage_template_name }}"
+ is_multiattach: false
+ is_bootable: false
+ loop: "{{ filesystem_volume_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.fstype is defined
+ - vol_item.size > 0
+ register: volume_provisioning
+
+- name: Attach Virtual Disk volumes to the IBM PowerVM VM
+ openstack.cloud.server_volume:
+ auth: "{{ openstack_auth_delegate }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ state: present
+ server: "{{ inventory_hostname }}"
+ volume: "{{ virtual_disk_item.volume.id }}"
+ loop: "{{ volume_provisioning.results }}"
+ loop_control:
+ loop_var: virtual_disk_item
+ index_var: virtual_disk_item_index
+ label: "{{ virtual_disk_item.volume.name }}"
+ retries: 2
+ delay: 5
+
+- name: Re-scan IBM PowerVM VM SCSI Bus
+ ansible.builtin.command: "/usr/bin/rescan-scsi-bus.sh"
+ register: rescan_scsi_bus_output
+ changed_when: rescan_scsi_bus_output.rc != 0
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: false
+ vars:
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+
+- name: Wait 10 seconds for the re-scan of IBM PowerVM VM SCSI Bus
+ ansible.builtin.pause:
+ seconds: 10
+ prompt: ""
+
+
+- name: Check VM status
+ register: register_provisioned_host_single_info
+ openstack.cloud.server_info:
+ auth: "{{ openstack_auth_delegate }}"
+ validate_certs: false # Allow Self-Signed Certificate
+ name: "{{ inventory_hostname }}"
+
+# Note: openstack.cloud.server_info Ansible Module can provide:
+# servers[0].metadata.original_host / servers[0].compute_host / servers[0].hypervisor_hostname for IBM PowerVM HMC System 'Machine Type Machine Serial' (MTMS)
+# servers[0].instance_name for IBM PowerVM HMC 'Partition Name' of the Virtual Machine
+# servers[0].vm_uuid for IBM PowerVM HMC 'Partition UUID' of the Virtual Machine
+# servers[0].id for IBM PowerVC 'ID' of the Virtual Machine
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single_info.servers[0] | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/post_deployment_execute.yml
new file mode 100644
index 0000000..19c7341
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ibmpowervm_vm/post_deployment_execute.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Post Deployment notification
+ ansible.builtin.debug:
+ msg: "There are no Post Deployment tasks for SAP on this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml
new file mode 100644
index 0000000..fa883bf
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_main.yml
@@ -0,0 +1,116 @@
+---
+
+- name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+- name: Set fact for auth - defaults
+ ansible.builtin.set_fact:
+ api_version: "kubevirt.io/v1"
+ validate_certs: "{{ default(lookup('env', 'K8S_AUTH_VERIFY_SSL')) | default(false) }}"
+ persist_config: "{{ default(lookup('env', 'K8S_AUTH_PERSIST_CONFIG')) | default(true) }}"
+ host: "{{ sap_vm_provision_kubevirt_cluster_url | default(lookup('env', 'K8S_AUTH_HOST')) | default(omit) }}" # Target Hypervisor Node
+
+- name: Set fact for auth - Kubeconfig
+ ansible.builtin.set_fact:
+ kubeconfig: "{{ sap_vm_provision_kubevirt_kubeconfig_path | default(lookup('env', 'K8S_AUTH_KUBECONFIG')) | default(lookup('env', 'KUBECONFIG')) | default(omit) }}"
+
+- name: Set fact for auth - API Key
+ ansible.builtin.set_fact:
+ api_key: "{{ sap_vm_provision_kubevirt_api_key | default(lookup('env', 'K8S_AUTH_API_KEY')) | default(omit) }}"
+ when: kubeconfig is defined
+
+- name: Set fact for auth - Username and Passwords
+ ansible.builtin.set_fact:
+ username: "{{ sap_vm_provision_kubevirt_username | default(lookup('env', 'K8S_AUTH_USERNAME')) | default(omit) }}"
+ password: "{{ sap_vm_provision_kubevirt_username | default(lookup('env', 'K8S_AUTH_PASSWORD')) | default(omit) }}"
+
+# - name: Set fact for auth - Alternative
+# ansible.builtin.set_fact:
+# ca_cert: "{{ default(lookup('env', 'K8S_AUTH_SSL_CA_CERT')) | default(omit) }}"
+# client_cert: "{{ default(lookup('env', 'K8S_AUTH_CERT_FILE')) | default(omit) }}"
+# client_key: "{{ default(lookup('env', 'K8S_AUTH_KEY_FILE')) | default(omit) }}"
+# context: "{{ default(lookup('env', 'K8S_AUTH_CONTEXT')) | default(omit) }}"
+
+- name: Provision hosts to KubeVirt
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+
+- name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].reported_devices[0].ips[0].address }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+- name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+- name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+ - name: Register Package Repositories
+ ansible.builtin.include_tasks:
+ file: common/register_os.yml
+
+ - name: Register Web Forward Proxy
+ ansible.builtin.include_tasks:
+ file: common/register_proxy.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml
new file mode 100644
index 0000000..7b10c3b
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/execute_provision.yml
@@ -0,0 +1,270 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+
+- name: Set fact for downloaded OS Image
+ ansible.builtin.set_fact:
+ os_image_downloaded: |-
+ {%- set disks_map = [
+ {
+ 'metadata': { 'name': (inventory_hostname + '-boot' | replace('_', '-')) },
+ 'spec' : {
+ 'source' : {
+ 'registry' : {
+ 'url': sap_vm_provision_kubevirt_vm_host_os_image_url,
+ 'pullMethod': 'node'
+ },
+ },
+ 'storage' : {
+ 'accessModes': ['ReadWriteOnce'],
+ 'resources': {
+ 'requests': {
+ 'storage': '50Gi'
+ }
+ }
+ }
+ }
+ }
+ ] -%}
+ {{ disks_map }}
+
+# - name: Set fact for existing OS Image
+# ansible.builtin.set_fact:
+# os_image_existing: |
+# {%- set disks_map = [
+# {
+# 'metadata': { 'name': (inventory_hostname + '-boot' | replace('_', '-')) },
+# 'spec' : {
+# 'source' : {
+# 'pvc' : {
+# 'name': (inventory_hostname + '-boot' | replace('_', '-')),
+# 'namespace': sap_vm_provision_kubevirt_target_namespace
+# },
+# },
+# 'storage' : {
+# 'accessModes': ['ReadWriteOnce'],
+# 'resources': {
+# 'requests': {
+# 'storage': '25Gi'
+# }
+# }
+# }
+# }
+# }
+# ] -%}
+# {{ disks_map }}
+
+
+- name: Set fact for storage volume template map
+ ansible.builtin.set_fact:
+ storage_disks_map: |-
+ {% set disks_map = [] -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = disks_map.extend([
+ {
+ 'metadata': { 'name': (inventory_hostname + '-' + storage_item.name + (idx | string) | replace('_', '-')) },
+ 'spec' : {
+ 'source' : {
+ 'blank' : {}
+ },
+ 'storage' : {
+ 'accessModes': ['ReadWriteMany'],
+ 'resources': {
+ 'requests': {
+ 'storage': ((storage_item.disk_size | default(0)) | string) + 'Gi'
+ }
+ },
+ 'storageClassName': storage_item.disk_type | default('')
+ }
+ }
+ }]) %}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ disks_map }}
+
+
+- name: Set fact for storage volumes attachment list
+ ansible.builtin.set_fact:
+ storage_disk_name_list: |-
+ {% set disks_simple_map = [] -%}
+ {% for list_item in os_image_downloaded -%}
+ {% set vol = disks_simple_map.extend([
+ {
+ 'name': list_item.metadata.name,
+ 'dataVolume': { 'name': list_item.metadata.name },
+ }
+ ]) %}
+ {%- endfor %}
+ {% for list_item in storage_disks_map -%}
+ {% set vol = disks_simple_map.extend([
+ {
+ 'name': list_item.metadata.name,
+ 'dataVolume': { 'name': list_item.metadata.name },
+ }
+ ]) %}
+ {%- endfor %}
+ {{ disks_simple_map }}
+
+- name: Set fact for cloud-init volume
+ ansible.builtin.set_fact:
+ cloud_init_volume:
+ - name: cloudinit
+ cloudInitNoCloud:
+ userData: |-
+ #cloud-config
+ hostname: "{{ inventory_hostname_short }}"
+ "{{ 'user: ' + sap_vm_provision_kubevirt_os_user if sap_vm_provision_kubevirt_os_user is defined }}"
+ "{{ 'password: ' + sap_vm_provision_kubevirt_os_user_password if sap_vm_provision_kubevirt_os_user_password is defined }}"
+ chpasswd:
+ expire: false
+ ssh_authorized_keys:
+ - "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}"
+ network:
+ version: 2
+ ethernets:
+ eth0:
+ dhcp4: true
+
+
+- name: Provision KubeVirt Virtual Machine
+ kubevirt.core.kubevirt_vm:
+ api_version: "{{ api_version | default(omit) }}"
+ validate_certs: "{{ validate_certs | default(omit) }}"
+ persist_config: "{{ persist_config | default(omit) }}"
+ host: "{{ host | default(omit) }}" # Target Hypervisor Node
+
+ kubeconfig: "{{ kubeconfig | default(omit) }}"
+ api_key: "{{ api_key | default(omit) }}"
+ username: "{{ username | default(omit) }}"
+ password: "{{ password | default(omit) }}"
+
+ # ca_cert: "{{ ca_cert | default(omit) }}"
+ # client_cert: "{{ client_cert | default(omit) }}"
+ # client_key: "{{ client_key | default(omit) }}"
+ # context: "{{ context | default(omit) }}"
+
+ ## Virtual Machine target Hypervisor definition
+ namespace: "{{ sap_vm_provision_kubevirt_target_namespace }}" # Target namespace
+
+ ## Virtual Machine definition
+ state: present
+ running: true
+ wait: true # ensure Virtual Machine in ready state before exiting Ansible Task
+ wait_sleep: 30 # 30 second poll for ready state
+ wait_timeout: 600 # 10 minute wait for ready state
+ force: false # Do not replace existing Virtual Machine with same name
+ name: "{{ inventory_hostname }}"
+ labels:
+ app: "{{ inventory_hostname }}"
+
+ # Virtual Disk volume definitions
+ data_volume_templates: "{{ storage_disks_map }}"
+
+ # Virtual Machine configuration
+ #preference:
+ # name: fedora # OS Image, not used when data volume template and spec contains volume using registry OS Image
+ #instancetype:
+ # name: u1.medium # VM Template Size, not used when spec contains cpu and memory configuration
+ spec:
+ domain:
+ ioThreadsPolicy: auto
+ hostname: "{{ item }}"
+ evictionStrategy: LiveMigrate
+ terminationGracePeriodSeconds: 1800 # 30 minutes after stop request before VM is force terminated
+
+ resources:
+ requests:
+ memory: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_memory_gib) + 16 }}Gi" # Static 16GB DRAM overhead for container runtime
+
+ devices: {}
+
+ cpu:
+ cores: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_cpu_threads) / kubevirt_vm_cpu_smt }}"
+ threads: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_cpu_threads }}"
+ dedicatedCpuPlacement: true
+ isolateEmulatorThread: true
+ model: host-passthrough
+ numa:
+ guestMappingPassthrough: {}
+ features:
+ - name: x2apic
+ policy: require
+ - name: rdtscp
+ policy: require
+ - name: invtsc
+ policy: require
+
+ memory:
+ guest: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].kubevirt_vm_memory_gib }}Gi"
+ hugepages:
+ pageSize: 1Gi
+
+ networks:
+ - name: bridge-network-definition
+ multus:
+ networkName: iface-bridge-sriov
+ - name: storage-network-definition
+ multus:
+ networkName: iface-storage-sriov
+ - name: multi-network-definition
+ multus:
+ networkName: iface-multi-sriov
+
+ volumes: "{{ storage_disk_name_list + cloud_init_volume }}"
+
+
+- name: Check VM status
+ register: register_provisioned_host_single_info
+ kubevirt.core.kubevirt_vm_info:
+ name: "{{ inventory_hostname }}"
+ namespace: "{{ sap_vm_provision_kubevirt_target_namespace }}"
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single_info.spec.UNKNOWN_VARIABLE_FOR_PRIVATE_IP_HERE }}"
+
+
+- name: Collect only facts about hardware
+ register: host_disks_info
+ ansible.builtin.setup:
+ gather_subset:
+ - hardware
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: false
+ vars:
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+
+#- name: Output disks
+# ansible.builtin.debug:
+# var: hostvars[inventory_hostname].ansible_devices.keys() | list
+
+#- name: Debug Ansible Facts devices used list
+# ansible.builtin.debug:
+# msg: "{{ host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list }}"
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single_info.spec.UKNOWN_VARIABLE_HERE | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/post_deployment_execute.yml
new file mode 100644
index 0000000..19c7341
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/kubevirt_vm/post_deployment_execute.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Post Deployment notification
+ ansible.builtin.debug:
+ msg: "There are no Post Deployment tasks for SAP on this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml
new file mode 100644
index 0000000..fd6bca6
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_main.yml
@@ -0,0 +1,167 @@
+---
+
+- name: Ansible Task block for looped provisioning of MS Azure VMs
+ environment:
+ ANSIBLE_AZURE_AUTH_SOURCE: "env"
+ AZURE_SUBSCRIPTION_ID: "{{ sap_vm_provision_msazure_subscription_id }}"
+ AZURE_TENANT: "{{ sap_vm_provision_msazure_tenant_id }}"
+ AZURE_CLIENT_ID: "{{ sap_vm_provision_msazure_app_client_id }}"
+ AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}"
+ block:
+
+ - name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+ - name: Provision hosts to MS Azure
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ apply:
+ environment:
+ ANSIBLE_AZURE_AUTH_SOURCE: "env"
+ AZURE_SUBSCRIPTION_ID: "{{ sap_vm_provision_msazure_subscription_id }}"
+ AZURE_TENANT: "{{ sap_vm_provision_msazure_tenant_id }}"
+ AZURE_CLIENT_ID: "{{ sap_vm_provision_msazure_app_client_id }}"
+ AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}"
+
+ - name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.privateIPAddress }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ # Create "A" (IPv4 Address) Resource Record to map IPv4 address as hostname / subdomain of the root domain name
+ - name: Ansible MS Azure Private DNS Records for hosts
+ azure.azcollection.azure_rm_privatednsrecordset:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ zone_name: "{{ hostvars[inventory_hostname].sap_vm_provision_dns_root_domain }}"
+ relative_name: "{{ inventory_hostname }}"
+ record_type: A
+ records:
+ - entry: "{{ hostvars[inventory_hostname].ansible_host }}"
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+
+- name: Ansible Task block to execute on target inventory hosts - High Availability
+ delegate_to: "{{ inventory_hostname }}"
+ when:
+ - sap_ha_pacemaker_cluster_msazure_resource_group is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ # Do not enable TCP timestamps on Azure VMs placed behind Azure Load Balancer.
+ # Enabling TCP timestamps will cause the health probes to fail.
+ # Set parameter net.ipv4.tcp_timestamps to 0. For details see Load Balancer health probes:
+ # https://learn.microsoft.com/en-us/azure/load-balancer/load-balancer-custom-probe-overview
+ - name: Adjust system tcp_timestamps
+ ansible.posix.sysctl:
+ name: net.ipv4.tcp_timestamps
+ value: "0"
+ state: present
+
+ - name: Stop firewalld on all hosts before setup of Azure Load Balancer
+ ansible.builtin.systemd:
+ name: firewalld
+ state: stopped
+ enabled: false
+
+ # Ensure Primary Active Network Interface is used for Linux Pacemaker configuration (e.g. eth0), see documentation for Accelerated Networking
+ - name: Identify Primary Active Network Interface
+ register: __msazure_primary_active_vnic
+ ansible.builtin.shell: |
+ set -o pipefail && ip route show default 0.0.0.0/0 | awk '/default/ {print $5}'
+
+ - name: Set facts on each host - Primary Active Network Interface for HA/DR
+ ansible.builtin.set_fact:
+ sap_ha_pacemaker_cluster_vip_client_interface: "{{ __msazure_primary_active_vnic.stdout }}"
+ when: __msazure_primary_active_vnic is defined
+
+
+- name: Ansible Task block for provisioning of High Availability resources for MS Azure VMs
+ delegate_to: localhost
+ run_once: true
+ environment:
+ ANSIBLE_AZURE_AUTH_SOURCE: "env"
+ AZURE_SUBSCRIPTION_ID: "{{ sap_vm_provision_msazure_subscription_id }}"
+ AZURE_TENANT: "{{ sap_vm_provision_msazure_tenant_id }}"
+ AZURE_CLIENT_ID: "{{ sap_vm_provision_msazure_app_client_id }}"
+ AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}"
+ when:
+ - sap_ha_pacemaker_cluster_msazure_resource_group is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Provision High Availability resources for MS Azure VM hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_setup_ha.yml"
+ apply:
+ environment:
+ ANSIBLE_AZURE_AUTH_SOURCE: "env"
+ AZURE_SUBSCRIPTION_ID: "{{ sap_vm_provision_msazure_subscription_id }}"
+ AZURE_TENANT: "{{ sap_vm_provision_msazure_tenant_id }}"
+ AZURE_CLIENT_ID: "{{ sap_vm_provision_msazure_app_client_id }}"
+ AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml
new file mode 100644
index 0000000..a1df0ee
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_provision.yml
@@ -0,0 +1,258 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+- name: Verify if network interface for MS Azure VM already exists (i.e. re-run)
+ register: register_provisioned_vnic_info
+ azure.azcollection.azure_rm_networkinterface_info:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "{{ inventory_hostname }}-nic"
+
+- name: Provision network interface for MS Azure VM
+ register: register_provisioned_vnic
+ azure.azcollection.azure_rm_networkinterface:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ location: "{{ sap_vm_provision_msazure_location_region }}"
+ name: "{{ inventory_hostname }}-nic"
+ virtual_network: "{{ sap_vm_provision_msazure_vnet_name }}"
+ subnet_name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ create_with_security_group: false
+ ip_configurations:
+ - name: "{{ inventory_hostname }}-nic-ipconfig"
+ primary: true
+ #private_ip_allocation_method: "Static" # When static, must define the specific IP Address
+ enable_accelerated_networking: true
+ enable_ip_forwarding: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true
+ when: not (register_provisioned_vnic_info.networkinterfaces | length) > 0
+
+- name: Provision MS Azure VM
+ register: register_provisioned_host_single
+ azure.azcollection.azure_rm_virtualmachine:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ location: "{{ sap_vm_provision_msazure_location_region }}"
+ name: "{{ inventory_hostname }}"
+ admin_username: "azureadmin"
+ ssh_password_enabled: false
+ ssh_public_keys:
+ - path: /home/azureadmin/.ssh/authorized_keys
+ key_data: "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}" # Replace with import/lookup via Ansible Module azure_rm_ssh_public_key/azure_rm_sshpublickey_info
+ vm_size: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].virtual_machine_profile }}"
+ image:
+ publisher: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_msazure_vm_host_os_image].publisher }}"
+ offer: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_msazure_vm_host_os_image].offer }}"
+ sku: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary')[sap_vm_provision_msazure_vm_host_os_image].sku }}"
+ version: latest
+ network_interfaces: "{{ inventory_hostname }}-nic"
+ public_ip_allocation_method: "Disabled"
+ managed_disk_type: StandardSSD_LRS
+ remove_on_absent: ["all"]
+ vm_identity: "SystemAssigned"
+ state: "present"
+ started: true
+
+
+- name: Read MS Azure VM information
+ register: api_host_info
+ azure.azcollection.azure_rm_virtualmachine_info:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "{{ inventory_hostname }}"
+
+- name: Read MS Azure VM attached disks information
+ register: api_host_disks_info
+ azure.azcollection.azure_rm_manageddisk_info:
+ managed_by: "{{ api_host_info.vms[0].id }}"
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.privateIPAddress }}"
+
+
+- name: Copy facts to delegate host
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ delegate_sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ delegate_sap_vm_provision_bastion_public_ip: "{{ sap_vm_provision_bastion_public_ip }}"
+ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}"
+ delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+
+- name: Collect only facts about hardware
+ register: host_disks_info
+ ansible.builtin.setup:
+ gather_subset:
+ - hardware
+ remote_user: azureadmin
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ vars:
+ ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+
+#- name: Output disks
+# ansible.builtin.debug:
+# var: hostvars[inventory_hostname].ansible_devices.keys() | list
+
+#- name: Debug Ansible Facts devices used list
+# ansible.builtin.debug:
+# msg: "{{ host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list }}"
+
+
+- name: Set fact for available storage volume device names
+ ansible.builtin.set_fact:
+ available_volumes: |-
+ {% set letters = 'bcdefghijklmnopqrstuvwxyz' %}
+ {% set ansible_facts_devices_used_list = host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list %}
+ {% set volumes = [] %}
+ {%- for letter in letters -%}
+ {% for device in ansible_facts_devices_used_list -%}
+ {% if '/dev/sd' + letter not in device -%}
+ {% set dev = volumes.append('/dev/sd' + letter) %}
+ {%- endif %}
+ {%- endfor %}
+ {% endfor %}
+ {{ volumes | list | unique }}
+
+#- name: Debug available_volumes
+# ansible.builtin.debug:
+# msg: "{{ available_volumes }}"
+
+# Combination of only the filesystem volume information from the lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')
+# for volume device assignment.
+# This task assigns device names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ filesystem_volume_map: |
+ {% set volume_map = [] -%}
+ {% set av_vol = available_volumes -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = volume_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'device': av_vol[0],
+ 'fstype': storage_item.filesystem_type | default('xfs'),
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('')
+ }
+ ]) %}
+ {%- set _ = av_vol.pop(0) -%}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ volume_map }}
+
+#- name: Debug filesystem_volume_map
+# ansible.builtin.debug:
+# msg: "{{ filesystem_volume_map }}"
+
+
+# The volume creation task requires the above task to define the parameter
+# which contains the calculated unique device names.
+- name: Provision Azure Managed Disk volumes for Azure VM filesystems
+ azure.azcollection.azure_rm_manageddisk:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ location: "{{ sap_vm_provision_msazure_location_region }}"
+ name: "{{ inventory_hostname }}-vol_{{ vol_item.name }}"
+ disk_size_gb: "{{ vol_item.size }}"
+ managed_by_extended:
+ - name: "{{ inventory_hostname }}"
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ # Premium SSD size (P), Standard SSD size (E), Standard HDD size (S)
+ storage_account_type: "{% if vol_item.type | regex_search('^P.*') %}Premium_LRS{% elif vol_item.type | regex_search('^E.*') %}StandardSSD_LRS{% elif vol_item.type | regex_search('^S.*') %}Standard_LRS{% else %}StandardSSD_LRS{% endif %}" # Standard_LRS, StandardSSD_LRS, Premium_LRS, UltraSSD_LRS
+ loop: "{{ filesystem_volume_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.fstype is defined
+ - vol_item.size > 0
+ register: volume_provisioning
+ failed_when: "(volume_provisioning.msg is defined) and ('already exists' not in volume_provisioning.msg)"
+
+
+- name: Add host facts
+ ansible.builtin.set_fact:
+ filesystem_volume_map: "{{ filesystem_volume_map }}"
+ volume_provisioning: "{{ volume_provisioning }}"
+ delegate_to: "{{ inventory_hostname }}"
+ delegate_facts: true
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.privateIPAddress }}"
+
+- name: Copy facts to delegate host
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ ansible.builtin.set_fact:
+ delegate_sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ delegate_sap_vm_provision_bastion_public_ip: "{{ sap_vm_provision_bastion_public_ip }}"
+ delegate_sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ delegate_sap_vm_provision_ssh_bastion_private_key_file_path: "{{ sap_vm_provision_ssh_bastion_private_key_file_path }}"
+ delegate_sap_vm_provision_ssh_host_private_key_file_path: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ delegate_private_ip: "{{ register_provisioned_host_single.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.privateIPAddress }}"
+ delegate_hostname: "{{ inventory_hostname }}"
+ delegate_sap_vm_provision_dns_root_domain_name: "{{ sap_vm_provision_dns_root_domain }}"
+ delegate_sap_vm_provision_ssh_host_public_key_file_path: "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path ) }}" # Replace with import/lookup via Ansible Module azure_rm_ssh_public_key/azure_rm_sshpublickey_info
+
+
+### begin block, parameters will be applied to each task within the block
+- name: Allow login from root OS User
+ remote_user: azureadmin
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: true
+ vars:
+ ansible_ssh_private_key_file: "{{ delegate_sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ delegate_sap_vm_provision_bastion_user }}@{{ delegate_sap_vm_provision_bastion_public_ip }} -p {{ delegate_sap_vm_provision_bastion_ssh_port }} -i {{ delegate_sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ block:
+
+ - name: Fix root authorized_keys entries
+ ansible.builtin.replace:
+ path: /root/.ssh/authorized_keys
+ backup: true
+ regexp: '(^.*ssh-rsa)'
+ replace: 'ssh-rsa'
+
+ - name: Permit root login
+ ansible.builtin.replace:
+ path: /etc/ssh/sshd_config
+ regexp: '(^PermitRootLogin no)'
+ replace: 'PermitRootLogin yes'
+ register: sshd_config
+
+ - name: Reload sshd service
+ ansible.builtin.service:
+ name: sshd
+ state: reloaded
+ when:
+ - sshd_config.changed
+
+### end of block
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml
new file mode 100644
index 0000000..bc48e5f
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/execute_setup_ha.yml
@@ -0,0 +1,562 @@
+---
+
+# - name: Gather information about MS Azure Route Table for the VNet Subnet
+# azure.azcollection.azure_rm_routetable_info:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# name: "name-here"
+# register: msazure_vnet_subnet_rt_info
+
+# - name: Ansible MS Azure Route Table append route for SAP HANA HA
+# azure.azcollection.azure_rm_route:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# route_table_name: "{{ msazure_vnet_subnet_rt_info.route_tables[0].id }}"
+# name: "{{ sap_swpm_db_host }}-rt"
+# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32') }}"
+# next_hop_type: "virtual_appliance"
+# next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}"
+# loop: "{{ (groups['hana_primary'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: msazure_vnet_subnet_rt_route_sap_hana
+# when:
+# - groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)
+
+- name: Ansible MS Azure Private DNS Records for SAP HANA HA Virtual Hostname
+ azure.azcollection.azure_rm_privatednsrecordset:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ zone_name: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ relative_name: "{{ sap_swpm_db_host }}"
+ record_type: A
+ records:
+ - entry: "{{ (sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('192.168.1.90/32')) | regex_replace('/.*', '') }}"
+ loop: "{{ (groups['hana_primary'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+
+# - name: Ansible MS Azure Route Table append route for SAP NetWeaver ASCS HA
+# azure.azcollection.azure_rm_route:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# route_table_name: "{{ msazure_vnet_subnet_rt_info.route_tables[0].id }}"
+# name: "{{ sap_swpm_ascs_instance_hostname }}-rt"
+# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32') }}"
+# next_hop_type: "virtual_appliance"
+# next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}"
+# loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: msazure_vnet_subnet_rt_route_sap_netweaver_ascs
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+- name: Ansible MS Azure Private DNS Records for SAP NetWeaver ASCS HA Virtual Hostname
+ azure.azcollection.azure_rm_privatednsrecordset:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ zone_name: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ relative_name: "{{ sap_swpm_ascs_instance_hostname }}"
+ record_type: A
+ records:
+ - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('192.168.2.10/32')) | regex_replace('/.*', '') }}"
+ loop: "{{ (groups['nwas_ascs'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# - name: Ansible MS Azure Route Table append route for SAP NetWeaver ERS HA
+# azure.azcollection.azure_rm_route:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# route_table_name: "{{ msazure_vnet_subnet_rt_info.route_tables[0].id }}"
+# name: "{{ sap_swpm_ers_instance_hostname }}-rt"
+# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32') }}"
+# next_hop_type: "virtual_appliance"
+# next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}"
+# loop: "{{ (groups['nwas_ers'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: msazure_vnet_subnet_rt_route_sap_netweaver_ers
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+- name: Ansible MS Azure Private DNS Records for SAP NetWeaver ERS HA Virtual Hostname
+ azure.azcollection.azure_rm_privatednsrecordset:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ zone_name: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+ relative_name: "{{ sap_swpm_ers_instance_hostname }}"
+ record_type: A
+ records:
+ - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('192.168.2.11/32')) | regex_replace('/.*', '') }}"
+ loop: "{{ (groups['nwas_ers'] | default([])) }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+## For HA of PAS and AAS, if required
+
+# - name: Ansible MS Azure Route Table append route for SAP NetWeaver PAS HA
+# azure.azcollection.azure_rm_route:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# route_table_name: "{{ msazure_vnet_subnet_rt_info.route_tables[0].id }}"
+# name: "{{ sap_swpm_pas_instance_hostname }}-rt"
+# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32') }}"
+# next_hop_type: "virtual_appliance"
+# next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}"
+# loop: "{{ (groups['nwas_pas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: msazure_vnet_subnet_rt_route_sap_netweaver_pas
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+# - name: Ansible MS Azure Private DNS Records for SAP NetWeaver PAS HA Virtual Hostname
+# azure.azcollection.azure_rm_privatednsrecordset:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# zone_name: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+# relative_name: "{{ sap_swpm_pas_instance_hostname }}"
+# record_type: A
+# records:
+# - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('192.168.2.12/32')) | regex_replace('/.*', '') }}"
+# loop: "{{ (groups['nwas_pas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+# - name: Ansible MS Azure Route Table append route for SAP NetWeaver AAS HA
+# azure.azcollection.azure_rm_route:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# route_table_name: "{{ msazure_vnet_subnet_rt_info.route_tables[0].id }}"
+# name: "{{ sap_swpm_aas_instance_hostname }}-rt"
+# address_prefix: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32') }}"
+# next_hop_type: "virtual_appliance"
+# next_hop_ip_address: "{{ hostvars[host_node].ansible_host }}"
+# loop: "{{ (groups['nwas_aas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# register: msazure_vnet_subnet_rt_route_sap_netweaver_aas
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)
+
+# - name: Ansible MS Azure Private DNS Records for SAP NetWeaver AAS HA Virtual Hostname
+# azure.azcollection.azure_rm_privatednsrecordset:
+# resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+# zone_name: "{{ hostvars[host_node].sap_vm_provision_dns_root_domain }}"
+# relative_name: "{{ sap_swpm_aas_instance_hostname }}"
+# record_type: A
+# records:
+# - entry: "{{ (sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('192.168.2.13/32')) | regex_replace('/.*', '') }}"
+# loop: "{{ (groups['nwas_aas'] | default([])) }}"
+# loop_control:
+# loop_var: host_node
+# when:
+# - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+
+
+- name: MS Azure IAM Role - Definition
+ azure.azcollection.azure_rm_roledefinition:
+ name: "Linux Fence Agent Role"
+ description: "Allows to power-off and start virtual machines"
+ #scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup
+ assignable_scopes:
+ - "/subscriptions/{{ sap_vm_provision_msazure_subscription_id }}"
+ permissions:
+ - actions:
+ - "Microsoft.Compute/*/read"
+ - "Microsoft.Compute/virtualMachines/powerOff/action"
+ - "Microsoft.Compute/virtualMachines/start/action"
+ # - data_actions:
+ # - not_actions:
+ # - not_data_actions:
+ register: msazure_iam_role_fencing
+
+- name: MS Azure - GenericRestClient call to Virtual Machine API to identify Managed Service Identity (MSI)
+ azure.azcollection.azure_rm_resource_info:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ provider: Compute
+ resource_type: virtualMachines
+ resource_name: "{{ host_node }}"
+ register: msazure_vm_info_collect
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+
+# Assign to the MSI Object ID (Service Principal ID)
+- name: MS Azure IAM Role - Assignment to MS Azure Managed Service Identity (MSI)
+ azure.azcollection.azure_rm_roleassignment:
+ #auth_source: msi
+ role_definition_id:
+ "{{ msazure_iam_role_fencing.id }}"
+ scope: "/subscriptions/{{ sap_vm_provision_msazure_subscription_id }}"
+ assignee_object_id: "{{ host_node.response[0].identity.principalId | default(none) }}"
+ loop: "{{ msazure_vm_info_collect.results }}"
+ loop_control:
+ loop_var: host_node
+ label: "{{ host_node.response[0].name | default(none) }}" # Use default to avoid "Failed to template 'dict object' has no attribute 'response'"
+
+
+# Azure Load Balancer Health Check Probe is the destination port upon connection to the virtual machine to check the virtual machine's health status.
+# Ensure the virtual machine is also listening on this port (that is, the port is open).
+# https://learn.microsoft.com/en-us/azure/load-balancer/manage-probes-how-to
+#
+# As Linux Pacemaker 'azure-lb' Resource Agent is not started before the Load Balancer Rule is created, must instead use a fake health check response
+# Otherwise the Azure Load Balancer Rule will automatically evaluate as all Virtual Machines in the Azure Load Balancer Backend Pool are 'probed DOWN'
+#
+# Note: ICMP ping requests to the Azure Load Balancer Frontend IP (i.e. Virtual IP) are handled by the Azure Load Balancer,
+# the ICMP ping requests are not sent to the Azure Load Balancer Backend Pool virtual machines
+# https://learn.microsoft.com/en-us/azure/load-balancer/load-balancer-test-frontend-reachability#usage-considerations
+#
+# The azure_rm_loadbalancer Ansible Module does not allow backend pool population - https://github.com/ansible-collections/azure/issues/866
+# Must use either workaround:
+# Opt 1. provision Load Balancer first, then assign vNIC at creation to the Load Balancer backend pool. This failed to register healthy VMs.
+# Opt 2. use azure_rm_networkinterface Ansible Module to update-in-place the vNIC and assign to a Load Balancer backend pool.
+# Option selected = Opt 2
+#
+# In addition, the Azure Load Balancer being provisioned before Linux Pacemaker, requires a temporary Health Check Probe port to be used with an an active OS service listening.
+# Therefore the Health Check Port probe will use Port 55550/55551/55552 during initial installation, and be switched after the SAP Installation and Linux Pacemaker installation to the correct port number.
+# This is because an Azure Load Balancer Rule will trigger the Health Check Probe once any host is added to the Backend Pool,
+# and if the host is not listening on the port it will fail, which will mark the resource as unhealthy, the traffic will not be routed to the host and if all hosts fail then the Frontend IP will also fail / not respond to ICMP Ping requests.
+
+
+- name: Ansible Task block for provisioning of Load Balancer for High Availability
+ delegate_to: localhost
+ run_once: true
+ environment:
+ ANSIBLE_AZURE_AUTH_SOURCE: "env"
+ AZURE_SUBSCRIPTION_ID: "{{ sap_vm_provision_msazure_subscription_id }}"
+ AZURE_TENANT: "{{ sap_vm_provision_msazure_tenant_id }}"
+ AZURE_CLIENT_ID: "{{ sap_vm_provision_msazure_app_client_id }}"
+ AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}"
+ when:
+ - sap_ha_pacemaker_cluster_msazure_resource_group is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Gather MS Azure Subnet ID
+ azure.azcollection.azure_rm_subnet_info:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ virtual_network_name: "{{ sap_vm_provision_msazure_vnet_name }}"
+ name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ register: msazure_vnet_subnet_info
+
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP for SAP HANA
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}"
+ vars:
+ __ip_element:
+ name: "lb-vip-hana{{ vip_index_nr }}"
+ private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ private_ip_allocation_method: "Static"
+ subnet: "{{ msazure_vnet_subnet_info.subnets[0].id }}"
+ zones: ["1", "2", "3"] # Zone-redundant
+ when:
+ - vip_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}"
+ loop_control:
+ index_var: vip_index_nr
+ loop_var: vip_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP for SAP AnyDB
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}"
+ vars:
+ __ip_element:
+ name: "lb-vip-anydb{{ vip_index_nr }}"
+ private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ private_ip_allocation_method: "Static"
+ subnet: "{{ msazure_vnet_subnet_info.subnets[0].id }}"
+ zones: ["1", "2", "3"] # Zone-redundant
+ when:
+ - vip_item | length > 0
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+ loop:
+ - "{{ sap_vm_temp_vip_anydb_primary | default() }}"
+ loop_control:
+ index_var: vip_index_nr
+ loop_var: vip_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP for SAP NetWeaver ASCS/ERS
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ips2: "{{ lb_frontend_virtual_ips2 | default([]) + [__ip_element] }}"
+ vars:
+ __ip_element:
+ name: "lb-vip-nwas{{ vip_index_nr }}"
+ private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ private_ip_allocation_method: "Static"
+ subnet: "{{ msazure_vnet_subnet_info.subnets[0].id }}"
+ zones: ["1", "2", "3"] # Zone-redundant
+ when:
+ - vip_item | length > 0
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}"
+ loop_control:
+ index_var: vip_index_nr
+ loop_var: vip_item
+
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP Health Check for SAP HANA
+ ansible.builtin.set_fact:
+ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}"
+ vars:
+ __probe_element:
+ name: "lb-probe-hc-vip-hana{{ sapinstance_index_nr }}"
+ protocol: Tcp
+ port: "55550" # "{{ ('626' + sapinstance_item | string) | int }}"
+ interval: 5
+ fail_count: 2
+ when:
+ - sapinstance_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_system_hana_db_instance_nr | default() }}"
+ loop_control:
+ index_var: sapinstance_index_nr
+ loop_var: sapinstance_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP Health Check for SAP AnyDB
+ ansible.builtin.set_fact:
+ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}"
+ vars:
+ __probe_element:
+ name: "lb-probe-hc-vip-anydb"
+ protocol: Tcp
+ port: "55550" # 62700
+ interval: 5
+ fail_count: 2
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP Health Check for SAP NetWeaver ASCS/ERS
+ ansible.builtin.set_fact:
+ lb_probes2: "{{ lb_probes2 | default([]) + [__probe_element] }}"
+ vars:
+ __probe_element:
+ name: "lb-probe-hc-vip-nwas{{ sapinstance_index_nr }}"
+ protocol: Tcp
+ port: "{{ ('5555' + (sapinstance_index_nr + 1)) | string | int }}" # "{{ ('626' + sapinstance_item | string) | int }}"
+ interval: 5
+ fail_count: 2
+ when:
+ - sapinstance_item | length > 0
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ loop:
+ - "{{ sap_system_nwas_abap_ascs_instance_nr | default() }}"
+ - "{{ sap_system_nwas_abap_ers_instance_nr | default() }}"
+ loop_control:
+ index_var: sapinstance_index_nr
+ loop_var: sapinstance_item
+
+
+ - name: Define Ansible Variables for Azure Load Balancer - LB Rule for SAP HANA
+ ansible.builtin.set_fact:
+ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}"
+ vars:
+ __rule_element:
+ name: "lb-rule-hana{{ rule_index_nr }}"
+ frontend_ip_configuration: "lb-vip-hana{{ rule_index_nr }}"
+ backend_address_pool: lb-backend-pool-hana
+ protocol: All
+ frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ probe: "lb-probe-hc-vip-hana{{ rule_index_nr }}"
+ load_distribution: Default # Session persistence = None
+ idle_timeout: 30 # 30 minutes
+ enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed
+ when:
+ - rule_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}"
+ loop_control:
+ index_var: rule_index_nr
+ loop_var: rule_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - LB Rule for SAP AnyDB
+ ansible.builtin.set_fact:
+ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}"
+ vars:
+ __rule_element:
+ name: "lb-rule-anydb{{ rule_index_nr }}"
+ frontend_ip_configuration: "lb-vip-anydb{{ rule_index_nr }}"
+ backend_address_pool: lb-backend-pool-anydb
+ protocol: All
+ frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ probe: "lb-probe-hc-vip-anydb"
+ load_distribution: Default # Session persistence = None
+ idle_timeout: 30 # 30 minutes
+ enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed
+ when:
+ - rule_item | length > 0
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+ loop:
+ - "{{ sap_vm_temp_vip_anydb_primary | default() }}"
+ loop_control:
+ index_var: rule_index_nr
+ loop_var: rule_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - LB Rule for SAP NetWeaver ASCS/ERS
+ ansible.builtin.set_fact:
+ lb_rules2: "{{ lb_rules2 | default([]) + [__rule_element] }}"
+ vars:
+ __rule_element:
+ name: "lb-rule-nwas{{ rule_index_nr }}"
+ frontend_ip_configuration: "lb-vip-nwas{{ rule_index_nr }}"
+ backend_address_pool: lb-backend-pool-nwas-ascs
+ protocol: All
+ frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ probe: "lb-probe-hc-vip-nwas{{ rule_index_nr }}"
+ load_distribution: Default # Session persistence = None
+ idle_timeout: 30 # 30 minutes
+ enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed
+ when:
+ - rule_item | length > 0
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}"
+ loop_control:
+ index_var: rule_index_nr
+ loop_var: rule_item
+
+ - name: MS Azure Load Balancer (network L4) - Create NLB for SAP HANA with Virtual IP and Health Probe configuration
+ azure.azcollection.azure_rm_loadbalancer:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "lb-sap-hana-ha" # "lb-sap-ha"
+ sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead
+ frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}"
+ backend_address_pools:
+ - name: lb-backend-pool-hana
+ probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}"
+ load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}"
+ when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0))
+ register: msazure_lb1a_info
+
+ - name: MS Azure Load Balancer (network L4) - Create NLB for SAP AnyDB with Virtual IP and Health Probe configuration
+ azure.azcollection.azure_rm_loadbalancer:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "lb-sap-anydb-ha" # "lb-sap-ha"
+ sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead
+ frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}"
+ backend_address_pools:
+ - name: lb-backend-pool-anydb
+ probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}"
+ load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}"
+ when: (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0))
+ register: msazure_lb1b_info
+
+ - name: MS Azure Load Balancer (network L4) - Define Ansible Variable of Load Balancer for Database Server
+ ansible.builtin.set_fact:
+ msazure_lb1_info: "{{ msazure_lb1a_info if (groups['hana_secondary'] is defined and (groups['hana_secondary']|length>0)) else msazure_lb1b_info if (groups['anydb_secondary'] is defined and (groups['anydb_secondary']|length>0)) }}"
+ when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0))
+
+ - name: MS Azure Load Balancer (network L4) - Create NLB for SAP NetWeaver with Virtual IP and Health Probe configuration
+ azure.azcollection.azure_rm_loadbalancer:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "lb-sap-nwas-ha"
+ sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead
+ frontend_ip_configurations: "{{ (lb_frontend_virtual_ips2 | default([])) }}"
+ backend_address_pools:
+ - name: lb-backend-pool-nwas-ascs
+ probes: "{{ (lb_probes2 | default([])) }}"
+ load_balancing_rules: "{{ (lb_rules2 | default([])) }}"
+ when: (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+ register: msazure_lb2_info
+
+ - name: Set fact to hold loop variables from include_tasks when SAP HANA HA
+ ansible.builtin.set_fact:
+ lb_ha_sap_hana: "{{ msazure_lb1_info.state.backend_address_pools | selectattr('name', '==', 'lb-backend-pool-hana') | map(attribute='id') | first }}"
+ when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0))
+
+ - name: Set fact to hold loop variables from include_tasks when SAP AnyDB HA
+ ansible.builtin.set_fact:
+ lb_ha_sap_anydb: "{{ msazure_lb1_info.state.backend_address_pools | selectattr('name', '==', 'lb-backend-pool-anydb') | map(attribute='id') | first }}"
+ when: (groups["anyb_secondary"] is defined and (groups["anydb_secondary"]|length>0))
+
+ - name: Set fact to hold loop variables from include_tasks when SAP NetWeaver HA
+ ansible.builtin.set_fact:
+ lb_ha_sap_nwas: "{{ msazure_lb2_info.state.backend_address_pools | selectattr('name', '==', 'lb-backend-pool-nwas-ascs') | map(attribute='id') | first }}"
+ when: (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+
+ - name: Update network interfaces for MS Azure VM - for SAP HANA HA with load balancing
+ register: register_provisioned_vnic1
+ azure.azcollection.azure_rm_networkinterface:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ location: "{{ sap_vm_provision_msazure_location_region }}"
+ name: "{{ host_node }}-nic"
+ virtual_network: "{{ sap_vm_provision_msazure_vnet_name }}"
+ subnet_name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ create_with_security_group: false
+ ip_configurations:
+ - name: "{{ host_node }}-nic-ipconfig"
+ primary: true
+ #private_ip_allocation_method: "Static" # When static, must define the specific IP Address
+ load_balancer_backend_address_pools:
+ - "{{ lb_ha_sap_hana }}"
+ enable_accelerated_networking: true
+ enable_ip_forwarding: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - "'hana_' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][host_node].sap_host_type" # REPLACE with substring in any of the strings contained in the list
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+
+ - name: Update network interfaces for MS Azure VM - for SAP AnyDB HA with load balancing
+ register: register_provisioned_vnic1
+ azure.azcollection.azure_rm_networkinterface:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ location: "{{ sap_vm_provision_msazure_location_region }}"
+ name: "{{ host_node }}-nic"
+ virtual_network: "{{ sap_vm_provision_msazure_vnet_name }}"
+ subnet_name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ create_with_security_group: false
+ ip_configurations:
+ - name: "{{ host_node }}-nic-ipconfig"
+ primary: true
+ #private_ip_allocation_method: "Static" # When static, must define the specific IP Address
+ load_balancer_backend_address_pools:
+ - "{{ lb_ha_sap_anydb }}"
+ enable_accelerated_networking: true
+ enable_ip_forwarding: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - "'anydb_' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][host_node].sap_host_type" # REPLACE with substring in any of the strings contained in the list
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+ - name: Update network interfaces for MS Azure VM - for SAP NetWeaver HA ASCS/ERS with load balancing
+ register: register_provisioned_vnic2
+ azure.azcollection.azure_rm_networkinterface:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ location: "{{ sap_vm_provision_msazure_location_region }}"
+ name: "{{ host_node }}-nic"
+ virtual_network: "{{ sap_vm_provision_msazure_vnet_name }}"
+ subnet_name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ create_with_security_group: false
+ ip_configurations:
+ - name: "{{ host_node }}-nic-ipconfig"
+ primary: true
+ #private_ip_allocation_method: "Static" # When static, must define the specific IP Address
+ load_balancer_backend_address_pools:
+ - "{{ lb_ha_sap_nwas }}"
+ enable_accelerated_networking: true
+ enable_ip_forwarding: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].disable_ip_anti_spoofing }}" # When disable the Anti IP Spoofing = true, then Enable IP Forwarding = true
+ loop: "{{ groups_merged_list }}"
+ loop_control:
+ loop_var: host_node
+ when:
+ - "'nwas_ascs' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][host_node].sap_host_type or 'nwas_ers' in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][host_node].sap_host_type" # REPLACE with substring in any of the strings contained in the list
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml
new file mode 100644
index 0000000..e14f006
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/msazure_vm/post_deployment_execute.yml
@@ -0,0 +1,278 @@
+---
+
+- name: Ansible Task block for amending Load Balancer ports for High Availability - after provisioning MS Azure VM
+ delegate_to: localhost
+ run_once: true
+ environment:
+ ANSIBLE_AZURE_AUTH_SOURCE: "env"
+ AZURE_SUBSCRIPTION_ID: "{{ sap_vm_provision_msazure_subscription_id }}"
+ AZURE_TENANT: "{{ sap_vm_provision_msazure_tenant_id }}"
+ AZURE_CLIENT_ID: "{{ sap_vm_provision_msazure_app_client_id }}"
+ AZURE_SECRET: "{{ sap_vm_provision_msazure_app_client_secret }}"
+ when:
+ - sap_ha_pacemaker_cluster_msazure_resource_group is defined
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ block:
+
+ - name: Inherit variable - set fact for Azure Load Balancer - VIP Health Check - SAP HANA
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ip_healthcheck_hana: "{{ sap_ha_pacemaker_cluster_healthcheck_hana_primary_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_hana_primary_port is defined
+
+ - name: Inherit variable - set fact for Azure Load Balancer - VIP Health Check - SAP NWAS ASCS
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ip_healthcheck_nwas_ascs: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port is defined
+
+ - name: Inherit variable - set fact for Azure Load Balancer - VIP Health Check - SAP NWAS ERS
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ip_healthcheck_nwas_ers: "{{ sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port | default('') }}"
+ when: sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port is defined
+
+ - name: Default variable - Set fact for Azure Load Balancer - VIP Health Check - SAP HANA
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ip_healthcheck_hana: "{{ ('620' + (sap_system_hana_db_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_hana_primary_port is defined
+
+ - name: Default variable - Set fact for Azure Load Balancer - VIP Health Check - SAP NWAS ASCS
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ip_healthcheck_nwas_ascs: "{{ ('620' + (sap_system_nwas_abap_ascs_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ascs_port is defined
+
+ - name: Default variable - Set fact for Azure Load Balancer - VIP Health Check - SAP NWAS ERS
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ip_healthcheck_nwas_ers: "{{ ('620' + (sap_system_nwas_abap_ers_instance_nr | default('')) | string) | int }}"
+ when: not sap_ha_pacemaker_cluster_healthcheck_nwas_abap_ers_port is defined
+
+ - name: Gather MS Azure Subnet ID
+ azure.azcollection.azure_rm_subnet_info:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ virtual_network_name: "{{ sap_vm_provision_msazure_vnet_name }}"
+ name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ register: msazure_vnet_subnet_info
+
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP for SAP HANA
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}"
+ vars:
+ __ip_element:
+ name: "lb-vip-hana{{ vip_index_nr }}"
+ private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ private_ip_allocation_method: "Static"
+ subnet: "{{ msazure_vnet_subnet_info.subnets[0].id }}"
+ zones: ["1", "2", "3"] # Zone-redundant
+ when:
+ - vip_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}"
+ loop_control:
+ index_var: vip_index_nr
+ loop_var: vip_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP for SAP AnyDB
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ips1: "{{ lb_frontend_virtual_ips1 | default([]) + [__ip_element] }}"
+ vars:
+ __ip_element:
+ name: "lb-vip-anydb{{ vip_index_nr }}"
+ private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ private_ip_allocation_method: "Static"
+ subnet: "{{ msazure_vnet_subnet_info.subnets[0].id }}"
+ zones: ["1", "2", "3"] # Zone-redundant
+ when:
+ - vip_item | length > 0
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+ loop:
+ - "{{ sap_vm_temp_vip_anydb_primary | default() }}"
+ loop_control:
+ index_var: vip_index_nr
+ loop_var: vip_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP for SAP NetWeaver ASCS/ERS
+ ansible.builtin.set_fact:
+ lb_frontend_virtual_ips2: "{{ lb_frontend_virtual_ips2 | default([]) + [__ip_element] }}"
+ vars:
+ __ip_element:
+ name: "lb-vip-nwas{{ vip_index_nr }}"
+ private_ip_address: "{{ vip_item | regex_replace('/.*', '') }}"
+ private_ip_allocation_method: "Static"
+ subnet: "{{ msazure_vnet_subnet_info.subnets[0].id }}"
+ zones: ["1", "2", "3"] # Zone-redundant
+ when:
+ - vip_item | length > 0
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}"
+ loop_control:
+ index_var: vip_index_nr
+ loop_var: vip_item
+
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP Health Check for SAP HANA
+ ansible.builtin.set_fact:
+ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}"
+ vars:
+ __probe_element:
+ name: "lb-probe-hc-vip-hana{{ healthcheck_index_nr }}"
+ protocol: Tcp
+ port: "{{ healthcheck_item }}"
+ interval: 5
+ fail_count: 2
+ when:
+ - healthcheck_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ lb_frontend_virtual_ip_healthcheck_hana | default() }}"
+ loop_control:
+ index_var: healthcheck_index_nr
+ loop_var: healthcheck_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP Health Check for SAP AnyDB
+ ansible.builtin.set_fact:
+ lb_probes1: "{{ lb_probes1 | default([]) + [__probe_element] }}"
+ vars:
+ __probe_element:
+ name: "lb-probe-hc-vip-anydb"
+ protocol: Tcp
+ port: "62700"
+ interval: 5
+ fail_count: 2
+ when:
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+
+ - name: Define Ansible Variables for Azure Load Balancer - VIP Health Check for SAP NetWeaver ASCS/ERS
+ ansible.builtin.set_fact:
+ lb_probes2: "{{ lb_probes2 | default([]) + [__probe_element] }}"
+ vars:
+ __probe_element:
+ name: "lb-probe-hc-vip-nwas{{ healthcheck_index_nr }}"
+ protocol: Tcp
+ port: "{{ healthcheck_item }}"
+ interval: 5
+ fail_count: 2
+ when:
+ - healthcheck_item | length > 0
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ loop:
+ - "{{ lb_frontend_virtual_ip_healthcheck_nwas_ascs | default() }}"
+ - "{{ lb_frontend_virtual_ip_healthcheck_nwas_ers | default() }}"
+ loop_control:
+ index_var: healthcheck_index_nr
+ loop_var: healthcheck_item
+
+
+ - name: Define Ansible Variables for Azure Load Balancer - LB Rule for SAP HANA
+ ansible.builtin.set_fact:
+ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}"
+ vars:
+ __rule_element:
+ name: "lb-rule-hana{{ rule_index_nr }}"
+ frontend_ip_configuration: "lb-vip-hana{{ rule_index_nr }}"
+ backend_address_pool: lb-backend-pool-hana
+ protocol: All
+ frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ probe: "lb-probe-hc-vip-hana{{ rule_index_nr }}"
+ load_distribution: Default # Session persistence = None
+ idle_timeout: 30 # 30 minutes
+ enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed
+ when:
+ - rule_item | length > 0
+ - groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default() }}"
+ loop_control:
+ index_var: rule_index_nr
+ loop_var: rule_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - LB Rule for SAP AnyDB
+ ansible.builtin.set_fact:
+ lb_rules1: "{{ lb_rules1 | default([]) + [__rule_element] }}"
+ vars:
+ __rule_element:
+ name: "lb-rule-anydb{{ rule_index_nr }}"
+ frontend_ip_configuration: "lb-vip-anydb{{ rule_index_nr }}"
+ backend_address_pool: lb-backend-pool-anydb
+ protocol: All
+ frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ probe: "lb-probe-hc-vip-anydb"
+ load_distribution: Default # Session persistence = None
+ idle_timeout: 30 # 30 minutes
+ enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed
+ when:
+ - rule_item | length > 0
+ - groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0)
+ loop:
+ - "{{ sap_vm_temp_vip_anydb_primary | default() }}"
+ loop_control:
+ index_var: rule_index_nr
+ loop_var: rule_item
+
+ - name: Define Ansible Variables for Azure Load Balancer - LB Rule for SAP NetWeaver ASCS/ERS
+ ansible.builtin.set_fact:
+ lb_rules2: "{{ lb_rules2 | default([]) + [__rule_element] }}"
+ vars:
+ __rule_element:
+ name: "lb-rule-nwas{{ rule_index_nr }}"
+ frontend_ip_configuration: "lb-vip-nwas{{ rule_index_nr }}"
+ backend_address_pool: lb-backend-pool-nwas-ascs
+ protocol: All
+ frontend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ backend_port: 0 # High Availability Ports (AnyPort), only on Internal Standard Load Balancer
+ probe: "lb-probe-hc-vip-nwas{{ rule_index_nr }}"
+ load_distribution: Default # Session persistence = None
+ idle_timeout: 30 # 30 minutes
+ enable_floating_ip: true # enable Frontend IP as a Floating IP (aka. Direct Server Return), if disabled then only 1 LB Rule allowed
+ when:
+ - rule_item | length > 0
+ - groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0)
+ loop:
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default() }}"
+ - "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default() }}"
+ loop_control:
+ index_var: rule_index_nr
+ loop_var: rule_item
+
+
+ - name: MS Azure Load Balancer (network L4) - Update NLB for SAP HANA with Virtual IP and Health Probe configuration
+ azure.azcollection.azure_rm_loadbalancer:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "lb-sap-hana-ha" # "lb-sap-ha"
+ sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead
+ frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}"
+ backend_address_pools:
+ - name: lb-backend-pool-hana
+ probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}"
+ load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}"
+ when: (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0))
+ register: msazure_lb1_info
+
+ - name: MS Azure Load Balancer (network L4) - Update NLB for SAP AnyDB with Virtual IP and Health Probe configuration
+ azure.azcollection.azure_rm_loadbalancer:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "lb-sap-anydb-ha" # "lb-sap-ha"
+ sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead
+ frontend_ip_configurations: "{{ (lb_frontend_virtual_ips1 | default([])) }}" # "{{ (lb_frontend_virtual_ips1 | default([])) + (lb_frontend_virtual_ips2 | default([])) }}"
+ backend_address_pools:
+ - name: lb-backend-pool-anydb
+ probes: "{{ (lb_probes1 | default([])) }}" # "{{ (lb_probes1 | default([])) + (lb_probes2 | default([])) }}"
+ load_balancing_rules: "{{ (lb_rules1 | default([])) }}" # "{{ (lb_rules1 | default([])) + (lb_rules2 | default([])) }}"
+ when: (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0))
+ register: msazure_lb1_info
+
+ - name: MS Azure Load Balancer (network L4) - Update NLB for SAP NetWeaver with Virtual IP and Health Probe configuration
+ azure.azcollection.azure_rm_loadbalancer:
+ resource_group: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ name: "lb-sap-nwas-ha"
+ sku: "Standard" # AnyPort (HA Port) Protocol rule is not allowed for basic SKU load balancer, use standard SKU load balancer instead
+ frontend_ip_configurations: "{{ (lb_frontend_virtual_ips2 | default([])) }}"
+ backend_address_pools:
+ - name: lb-backend-pool-nwas-ascs
+ probes: "{{ (lb_probes2 | default([])) }}"
+ load_balancing_rules: "{{ (lb_rules2 | default([])) }}"
+ when: (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+ register: msazure_lb2_info
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml
new file mode 100644
index 0000000..61a32eb
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_main.yml
@@ -0,0 +1,101 @@
+---
+
+- name: OVirt Authentication
+ ovirt.ovirt.ovirt_auth:
+ insecure: "{{ sap_vm_provision_ovirt_engine_insecure_bool | default(true) }}"
+ url: "{{ sap_vm_provision_ovirt_engine_url | default(lookup('env', 'OVIRT_URL')) | default(omit) }}"
+ hostname: "{{ sap_vm_provision_ovirt_engine_fqdn | default(lookup('env', 'OVIRT_HOSTNAME')) | default(omit) }}"
+ username: "{{ sap_vm_provision_ovirt_engine_user | default(lookup('env', 'OVIRT_USERNAME')) | default(omit) }}"
+ password: "{{ sap_vm_provision_ovirt_engine_password | default(lookup('env', 'OVIRT_PASSWORD')) | default(omit) }}"
+ ca_file: "{{ sap_vm_provision_ovirt_engine_cafile | default(lookup('env', 'OVIRT_CAFILE')) | default(omit) }}"
+ when: ovirt_auth is undefined or not ovirt_auth
+ register: ovirt_session
+
+- name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+- name: Provision hosts to OVirt
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ vars:
+ ovirt_auth: "{{ sap_vm_provision_ovirt_auth }}"
+
+- name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].reported_devices[0].ips[0].address }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+- name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+- name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+ - name: Register Package Repositories
+ ansible.builtin.include_tasks:
+ file: common/register_os.yml
+
+ - name: Register Web Forward Proxy
+ ansible.builtin.include_tasks:
+ file: common/register_proxy.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml
new file mode 100644
index 0000000..af1db50
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/execute_provision.yml
@@ -0,0 +1,297 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+- name: Check if VM exists
+ register: register_check_vm_exists
+ ovirt.ovirt.ovirt_vm_info:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ pattern: name={{ inventory_hostname }} and cluster={{ sap_vm_provision_ovirt_hypervisor_cluster_name }}
+
+
+# For later check if the provided OS name is actually available
+- name: Check available OS names in OVirt
+ ovirt.ovirt.ovirt_vm_os_info:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ register: register_ovirt_available_os
+
+
+# VM creation block:
+# This block is run when the VM does not exist yet.
+#
+- name: Block that provisions the VM
+ when:
+ - register_check_vm_exists.ovirt_vms is defined
+ - register_check_vm_exists.ovirt_vms | length == 0
+ block:
+
+ - name: For Kickstart, provision Virtual Disk boot volume
+ ovirt.ovirt.ovirt_disk:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ name: "{{ inventory_hostname }}-vol_os"
+ size: "{{ sap_vm_provision_ovirt_vm_kickstart_definition.boot_disk.size }}"
+ format: "{{ sap_vm_provision_ovirt_vm_kickstart_definition.boot_disk.format }}"
+ storage_domain: "{{ sap_vm_provision_ovirt_hypervisor_cluster_storage_domain_name }}" # Hypervisor Cluster's attached storage domain
+ wait: true
+ when:
+ - sap_vm_provision_ovirt_vm_kickstart_definition is defined
+ - sap_vm_provision_ovirt_vm_kickstart_definition | length > 0
+ - sap_vm_provision_ovirt_vm_template_name is not defined or
+ sap_vm_provision_ovirt_vm_template_name | length == 0
+ register: register_provisioned_boot_disk
+ until: register_provisioned_boot_disk.disk.status == 'ok'
+ retries: 600
+
+ - name: Merge disk provisioning result with disk attachment definition
+ ansible.builtin.set_fact:
+ merge_provisioned_boot_disk_fact: "{{ register_provisioned_boot_disk.disk | ansible.builtin.combine(sap_vm_provision_ovirt_vm_kickstart_definition.boot_disk) }}"
+ when:
+ - sap_vm_provision_ovirt_vm_kickstart_definition is defined
+ - sap_vm_provision_ovirt_vm_kickstart_definition | length > 0
+ - register_provisioned_boot_disk is defined
+
+ - name: Convert disk provisioning result to disk attachment list
+ ansible.builtin.set_fact:
+ provisioned_boot_disk_fact: "{{ provisioned_boot_disk_fact | default([]) + [merge_provisioned_boot_disk_fact] }}"
+ when:
+ - sap_vm_provision_ovirt_vm_kickstart_definition is defined
+ - sap_vm_provision_ovirt_vm_kickstart_definition | length > 0
+ - register_provisioned_boot_disk is defined
+
+
+ - name: Provision OVirt Virtual Machine
+ register: register_provisioned_host_single
+ ovirt.ovirt.ovirt_vm:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+
+ ## Virtual Machine target Hypervisor definition
+ cluster: "{{ sap_vm_provision_ovirt_hypervisor_cluster_name }}" # Hypervisor Cluster
+ host: "{{ sap_vm_provision_ovirt_hypervisor_cluster_host_node_name | default('') }}" # Hypervisor Cluster Node
+ storage_domain: "{{ sap_vm_provision_ovirt_hypervisor_cluster_storage_domain_name }}" # Hypervisor Cluster's attached storage domain
+
+ ## Virtual Machine definition
+ state: running
+ wait: true # wait until VM is running before Ansible Module is marked as completed
+ timeout: 1200 # seconds, wait 20 minutes for VM to provision
+ name: "{{ inventory_hostname }}"
+ description: "{{ inventory_hostname }} created by Ansible Playbook for SAP"
+ comment: "{{ inventory_hostname }} created by Ansible Playbook for SAP"
+
+ ## Virtual Machine main resources definition
+ cpu_sockets: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_cpu_threads }}"
+ # Size suffix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)
+ memory: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_memory_gib }}GiB"
+ # Make sure guaranteed memory is defined to avoid error when calculated larger than set in VM template.
+ memory_guaranteed: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_memory_gib }}GiB"
+
+ ## Virtual Machine settings configuration
+ # Do not use Memory (RAM) ballooning, avoid over-commit of Memory
+ ballooning_enabled: false
+ boot_devices:
+ - hd
+ - network
+ boot_menu: "{{ sap_vm_provision_ovirt_vm_boot_menu }}"
+ usb_support: false
+ soundcard_enabled: false
+ high_availability: false
+ operating_system: "{{ sap_vm_provision_ovirt_vm_operating_system }}"
+ placement_policy: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_placement_policy | default('pinned') }}"
+ stateless: false
+ timezone: "{{ sap_vm_provision_ovirt_vm_timezone }}"
+ # Virtual Machine Type: high_performance, server, desktop
+ type: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].ovirt_vm_type | default('high_performance') }}"
+
+ ## Virtual Machine Storage configuration
+ disk_format: "{{ sap_vm_provision_ovirt_vm_disk_type }}" # RHV default is 'cow' = thin provisioning
+ disks: "{{ provisioned_boot_disk_fact | default([]) }}" # If using VM Template, leave blank (list)
+
+ ## Virtual Machine Network configuration - virtio vNICs
+ nics: "{{ sap_vm_provision_ovirt_vm_nics | default([]) }}" # Optional definition of NIC devices list
+
+ ## Option 1: Virtual Machine clone from VM Template
+ template: "{{ sap_vm_provision_ovirt_vm_template_name }}" # VM Template name stored on Hypervisor Cluster
+ #template_version: "" # default is the latest available version of the template
+ #use_latest_template_version: true # if true, forces a stateless VM, keep as reminder
+ #allow_partial_import: false
+ clone: "{{ sap_vm_provision_ovirt_vm_clone_independent }}" # VM will be cloned as (in)dependent from template
+ clone_permissions: true # VM Template permissions are cloned
+
+ ## Option 2: Virtual Machine deployment through PXE and Kickstart
+ # Requires definition: sap_vm_provision_ovirt_vm_kickstart_definition
+ cd_iso: "{{ sap_vm_provision_ovirt_vm_kickstart_definition.os_image_iso | default('') }}"
+ initrd_path: "{{ sap_vm_provision_ovirt_vm_kickstart_definition.initrd_path | default('') }}"
+ kernel_path: "{{ sap_vm_provision_ovirt_vm_kickstart_definition.kernel_path | default('') }}"
+ kernel_params: "{{ sap_vm_provision_ovirt_vm_kickstart_definition.kernel_params | default('') }}"
+ kernel_params_persist: false # do not save beyond one run to avoid an installation loop
+
+ ## Post-provisioning: Virtual Machine post configuration using cloud-init.
+ # Requires provisioning from a template, which was cloud-enabled.
+ # Cloud-init is run once (persist = false) or at each boot (persist = true).
+ cloud_init_persist: "{{ sap_vm_provision_ovirt_vm_cloud_init_persist | default('false') }}"
+ cloud_init: "{{ sap_vm_provision_ovirt_vm_cloud_init | default({}) }}"
+
+ # Make sure the kernel and cloud-init options are executed once,
+ # but are not repeated after a reboot (volatile = true).
+ # Default is 'false'.
+ # UI option: "Rollback this configuration during reboots"
+ volatile: true
+
+ # Report VM back only after it is done creating the clone image.
+ until: register_provisioned_host_single.vm.status != "image_locked"
+ retries: 120
+ delay: 5
+
+### End of boot disk and VM creation block
+
+
+- name: Start the VM, if not running
+ ovirt.ovirt.ovirt_vm:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ name: "{{ inventory_hostname }}"
+ state: running
+
+- name: Remove installation ISO from the config
+ ovirt.ovirt.ovirt_vm:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ name: "{{ inventory_hostname }}"
+ cd_iso: ""
+ when: sap_vm_provision_ovirt_vm_kickstart_definition is defined
+
+- name: Check VM status
+ register: register_provisioned_host_single_info
+ ovirt.ovirt.ovirt_vm_info:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ pattern: name={{ inventory_hostname }} and cluster={{ sap_vm_provision_ovirt_hypervisor_cluster_name }}
+ all_content: true
+ fetch_nested: true
+ nested_attributes:
+ - ips
+ - name
+ - applications
+ # Allow for 15 minutes until the VM reports devices, which include the IP and
+ # are required in following tasks.
+ until: register_provisioned_host_single_info.ovirt_vms[0].reported_devices | length > 0
+ retries: 180
+ delay: 5
+
+
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ register_provisioned_host_single_info.ovirt_vms[0].reported_devices[0].ips[0].address }}"
+
+
+- name: Collect only facts about hardware
+ register: host_disks_info
+ ansible.builtin.setup:
+ gather_subset:
+ - hardware
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: false
+ vars:
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+
+#- name: Output disks
+# ansible.builtin.debug:
+# var: hostvars[inventory_hostname].ansible_devices.keys() | list
+
+#- name: Debug Ansible Facts devices used list
+# ansible.builtin.debug:
+# msg: "{{ host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list }}"
+
+
+- name: Set fact for available storage volume device names
+ ansible.builtin.set_fact:
+ available_volumes: |-
+ {% set letters = 'bcdefghijklmnopqrstuvwxyz' %}
+ {% set ansible_facts_devices_used_list = host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list %}
+ {% set volumes = [] %}
+ {%- for letter in letters -%}
+ {% for device in ansible_facts_devices_used_list -%}
+ {% if '/dev/sd' + letter not in device -%}
+ {% set dev = volumes.append('/dev/sd' + letter) %}
+ {%- endif %}
+ {%- endfor %}
+ {% endfor %}
+ {{ volumes | list | unique }}
+
+# - name: Debug available_volumes
+# ansible.builtin.debug:
+# msg: "{{ available_volumes }}"
+
+
+# Combination of only the filesystem volume information from the lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')
+# for volume device assignment.
+# This task assigns device names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ filesystem_volume_map: |
+ {% set volume_map = [] -%}
+ {% set av_vol = available_volumes -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = volume_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'device': av_vol[0],
+ 'fstype': storage_item.filesystem_type | default('xfs'),
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('')
+ }
+ ]) %}
+ {%- set _ = av_vol.pop(0) -%}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ volume_map }}
+
+#- name: Debug filesystem_volume_map
+# ansible.builtin.debug:
+# msg: "{{ filesystem_volume_map }}"
+
+
+# The volume creation task requires the above task to define the parameter
+# which contains the calculated unique device names.
+- name: Provision Virtual Disk volumes for OVirt VM filesystems
+ ovirt.ovirt.ovirt_disk:
+ auth: "{{ ovirt_session.ansible_facts.ovirt_auth }}"
+ name: "{{ inventory_hostname }}-vol_{{ vol_item.name }}"
+ vm_name: "{{ inventory_hostname }}"
+ size: "{{ vol_item.size }}GiB"
+ format: cow
+ interface: virtio_scsi
+ storage_domain: "{{ sap_vm_provision_ovirt_hypervisor_cluster_storage_domain_name }}" # Hypervisor Cluster's attached storage domain
+ wait: true
+ bootable: false
+ loop: "{{ filesystem_volume_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.fstype is defined
+ - vol_item.size > 0
+ register: volume_provisioning
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single_info.ovirt_vms[0] | combine( { 'host_node' : inventory_hostname } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/post_deployment_execute.yml
new file mode 100644
index 0000000..19c7341
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/ovirt_vm/post_deployment_execute.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Post Deployment notification
+ ansible.builtin.debug:
+ msg: "There are no Post Deployment tasks for SAP on this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml
new file mode 100644
index 0000000..1d95370
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_main.yml
@@ -0,0 +1,107 @@
+---
+
+- name: Ansible Task block for looped provisioning of VMware VMs
+ block:
+
+ - name: Set fact to hold loop variables from include_tasks
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: []
+
+ # Use vmware.vmware_rest Ansible Collection for VMware vCenter REST API, for VMware vSphere 7.0.2+
+ # Does not use community.vmware Ansible Collection for legacy pyvmomi Python Package for VMware vCenter SOAP API
+
+ # Use of environment avoids the need for variables in each Ansible Module call
+ # Hypervisor Control Plane credentials
+ # vcenter_hostname: "{{ sap_vm_provision_vmware_vcenter_hostname | default(lookup('env', 'VMWARE_HOST')) | default(omit) }}"
+ # vcenter_validate_certs: "{{ (sap_vm_provision_vmware_vcenter_validate_certs_bool | default(lookup('env', 'VMWARE_VALIDATE_CERTS'))) | bool | default(false) }}"
+ # vcenter_username: "{{ sap_vm_provision_vmware_vcenter_user | default(lookup('env', 'VMWARE_USER')) | default(omit) }}"
+ # vcenter_password: "{{ sap_vm_provision_vmware_vcenter_password | default(lookup('env', 'VMWARE_PASSWORD')) | default(omit) }}"
+
+ - name: Provision hosts to VMware vSphere
+ register: register_provisioned_hosts
+ ansible.builtin.include_tasks:
+ file: "{{ 'platform_' + sap_vm_provision_iac_type }}/{{ sap_vm_provision_iac_platform }}/execute_provision.yml"
+ apply:
+ environment:
+ VMWARE_HOST: "{{ sap_vm_provision_vmware_vcenter_hostname | default(lookup('env', 'VMWARE_HOST')) | default(omit) }}"
+ VMWARE_VALIDATE_CERTS: "{{ (sap_vm_provision_vmware_vcenter_validate_certs_bool | default(lookup('env', 'VMWARE_VALIDATE_CERTS'))) | bool | default(false) }}"
+ VMWARE_USER: "{{ sap_vm_provision_vmware_vcenter_user | default(lookup('env', 'VMWARE_USER')) | default(omit) }}"
+ VMWARE_PASSWORD: "{{ sap_vm_provision_vmware_vcenter_password | default(lookup('env', 'VMWARE_PASSWORD')) | default(omit) }}"
+
+ - name: Add hosts provisioned to the Ansible Inventory
+ register: register_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ add_item[0].host_node }}"
+ groups: "{{ add_item[0].sap_system_type + '_' if (add_item[0].sap_system_type != '') }}{{ add_item[0].sap_host_type }}"
+ ansible_host: "{{ add_item[0].vmware_vm_network_info.ansible_facts.ansible_default_ipv4.address }}"
+ ansible_user: "root"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ sap_vm_provision_bastion_user }}@{{ sap_vm_provision_bastion_public_ip }} -p {{ sap_vm_provision_bastion_ssh_port }} -i {{ sap_vm_provision_ssh_bastion_private_key_file_path }} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ ansible_play_hosts | map('extract', hostvars, 'register_provisioned_host_all') }}"
+ loop_control:
+ label: "{{ add_item[0].host_node }}"
+ loop_var: add_item
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+
+ # - ansible.builtin.debug:
+ # var: register_add_hosts.results
+
+- name: Ansible Task block to execute on target inventory hosts
+ delegate_to: "{{ inventory_hostname }}"
+ block:
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
+
+ - name: Register Package Repositories
+ ansible.builtin.include_tasks:
+ file: common/register_os.yml
+
+ - name: Register Web Forward Proxy
+ ansible.builtin.include_tasks:
+ file: common/register_proxy.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml
new file mode 100644
index 0000000..ae2c144
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/execute_provision.yml
@@ -0,0 +1,378 @@
+---
+# The tasks in this file are executed in a loop over the defined hosts
+
+# Use vmware.vmware_rest Ansible Collection for VMware vCenter REST API, for VMware vSphere 7.0.2+
+# Does not use community.vmware Ansible Collection for legacy pyvmomi Python Package for VMware vCenter SOAP API
+
+
+# When SAP HANA Scale-Out is used, if host name is not in original specifications then strip suffix node number from host name
+- name: Set fact when performing SAP HANA Scale-Out
+ ansible.builtin.set_fact:
+ scaleout_origin_host_spec: "{{ inventory_hostname | regex_replace('^(.+?)\\d*$', '\\1') }}"
+ when:
+ - sap_hana_scaleout_active_coordinator is defined
+ - not inventory_hostname in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan].keys()
+
+- name: Identify VM Folder
+ register: register_vmware_vm_folder
+ vmware.vmware_rest.vcenter_folder_info:
+ names: "{{ sap_vm_provision_vmware_vm_folder_name }}"
+ type: VIRTUAL_MACHINE
+
+- name: Identify Datacenter Cluster
+ register: register_vmware_vm_cluster
+ vmware.vmware_rest.vcenter_cluster_info:
+ names: "{{ sap_vm_provision_vmware_vm_cluster_name }}"
+
+- name: Identify Host in Datacenter Cluster
+ register: register_vmware_vm_cluster_host
+ vmware.vmware_rest.vcenter_host_info:
+ names: "{{ sap_vm_provision_vmware_vm_cluster_host_name }}"
+
+- name: Identify Datastore
+ register: register_vmware_vm_cluster_datastore
+ vmware.vmware_rest.vcenter_datastore_info:
+ names: "{{ sap_vm_provision_vmware_vm_cluster_datastore_name }}"
+
+- name: Identify Content Library (to store VM Template)
+ register: register_vmware_vm_content_library
+ vmware.vmware_rest.content_locallibrary:
+ name: "{{ sap_vm_provision_vmware_vm_content_library_name }}"
+
+- name: List all items in Content Library
+ register: register_vmware_vm_content_library_items
+ vmware.vmware_rest.content_library_item_info:
+ library_id: "{{ register_vmware_vm_content_library.id }}"
+
+- name: Identify VMware Template ID
+ ansible.builtin.set_fact:
+ vmware_vm_template_id: "{{ (register_vmware_vm_content_library_items.value | selectattr('type', '==', 'vm-template') | selectattr('name', '==', sap_vm_provision_vmware_vm_template_name) | first).id }}"
+
+- name: Check if VM exists
+ register: register_check_vm_exists
+ vmware.vmware_rest.vcenter_vm_info:
+ names: "{{ inventory_hostname }}"
+
+
+- name: Set VM ID
+ when: not register_check_vm_exists.value | length == 0
+ ansible.builtin.set_fact:
+ register_vmware_vm_cluster_host_id: "{{ register_check_vm_exists.value[0].vm }}" # VM ID
+
+- name: Check VM status
+ register: register_provisioned_host_single_info
+ when: not register_check_vm_exists.value | length == 0
+ vmware.vmware_rest.vcenter_vm:
+ vm: "{{ register_vmware_vm_cluster_host_id }}" # VM ID
+
+
+# VM creation block:
+# This block is run when the VM does not exist yet.
+- name: Block that provisions the VM
+ when: register_check_vm_exists.value | length == 0
+ block:
+
+ # Deploy a Virtual Machine from a VM Template in a Content Library
+ # Doc: https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-6EA309BC-9113-449C-B668-ACBB363485C3.html
+ - name: Provision VMware Virtual Machine based upon the VM Template
+ register: register_provisioned_host_single
+ vmware.vmware_rest.vcenter_vmtemplate_libraryitems:
+
+ ## Virtual Machine target Hypervisor definition
+ placement:
+ folder: "{{ (register_vmware_vm_folder.value | first).folder }}"
+ # resource_pool: ""
+ cluster: "{{ (register_vmware_vm_cluster.value | first).cluster }}"
+ host: "{{ (register_vmware_vm_cluster_host.value | first).host }}"
+
+ ## Virtual Machine clone from VM Template definition
+ template_library_item: '{{ vmware_vm_template_id }}' # ID of the Content Library Item with the source VM Template (not OVF) to be cloned and deployed
+ state: deploy # Deploy the VM Template defined in template_library_item
+ powered_on: false # Power on after VM Template is cloned and deployed
+ session_timeout: 600 # 10 minutes
+
+ ## Virtual Machine definition
+ name: "{{ inventory_hostname }}"
+ description: "{{ inventory_hostname }} created by Ansible Playbook for SAP"
+
+ ## Virtual Machine main resources definition
+ ## May cause conflict with powered_on parameter
+ hardware_customization:
+ cpu_update:
+ num_cpus: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].vmware_vm_cpu_threads }}"
+ num_cores_per_socket: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].vmware_vm_cpu_smt }}"
+ memory_update:
+ memory: "{{ (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].vmware_vm_memory_gib | int) * 1024 }}" # MiB
+ # nics:
+
+ ## Virtual Machine Storage configuration
+ ## Boot Disk will be loaded to this datastore
+ disk_storage:
+ datastore: "{{ (register_vmware_vm_cluster_datastore.value | first).datastore }}"
+ # storage_policy:
+
+ # # Report VM back only after it is done creating the clone image.
+ # until: register_provisioned_host_single.vm.status != "image_locked"
+ # retries: 120
+ # delay: 5
+
+### End of boot disk and VM creation block
+
+
+- name: Set VM ID
+ when: register_check_vm_exists.value | length == 0
+ ansible.builtin.set_fact:
+ register_vmware_vm_cluster_host_id: "{{ register_provisioned_host_single.value }}" # Returned from VM provision
+
+
+- name: Check VM status
+ register: register_provisioned_host_single_info
+ vmware.vmware_rest.vcenter_vm:
+ vm: "{{ register_vmware_vm_cluster_host_id }}"
+
+
+# Example https://cloudinit.readthedocs.io/en/23.4.1/reference/datasources/vmware.html#walkthrough-of-guestinfo-keys-transport
+# Docs https://developer.vmware.com/docs/18555/GUID-75E27FA9-2E40-4CBF-BF3D-22DCFC8F11F7.html
+# >> The instance-id key is required. All other keys are optional.
+- name: Set cloud-init variables for customization specification
+ when: register_provisioned_host_single_info.value.power_state is defined and register_provisioned_host_single_info.value.power_state != "POWERED_ON"
+ ansible.builtin.set_fact:
+ metadata_yaml:
+ instance-id: "{{ inventory_hostname }}"
+ hostname: "{{ inventory_hostname }}"
+ local-hostname: "{{ inventory_hostname }}"
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: e*
+ dhcp4: true
+ dhcp6: false
+ public_ssh_keys:
+ - "{{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path) }}"
+
+ userdata_yaml_text: |
+ #cloud-config
+
+ hostname: {{ inventory_hostname }}
+ fqdn: {{ inventory_hostname }}.{{ sap_vm_provision_dns_root_domain }}
+
+ # timezone: "Etc/UTC"
+
+ # Ensure root login is enabled
+ disable_root: false
+
+ # Ensure SSH password authentication is disabled for all users
+ ssh_pwauth: false
+
+ # Ensure all existing SSH Keys are removed from host
+ ssh_deletekeys: true
+
+ # By default, (most) ssh host keys are printed to the console
+ # Set emit_keys_to_console to false suppresses this output
+ ssh:
+ emit_keys_to_console: false
+
+ # By default, the fingerprints of the authorized keys for the users
+ # cloud-init adds are printed to the console. Setting
+ # no_ssh_fingerprints to true suppresses this output
+ no_ssh_fingerprints: false
+
+ # For first user in the cloud-init configuration, set the SSH Public Key
+ ssh_authorized_keys:
+ - {{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path) }}
+
+ # Add entry to /root/.ssh/authorized_keys
+ # Do not lock password once access to host, however password authentication for SSH remains disabled
+ # Ensure SSH password authentication is disabled for root by 'ssh_pwauth' config
+ users:
+ - name: root
+ ssh_authorized_keys:
+ - {{ lookup('ansible.builtin.file', sap_vm_provision_ssh_host_public_key_file_path) }}
+ lock_passwd: false
+
+ # After first boot of the VMware VM Template, disable cloud-init from running again
+ write_files:
+ - path: /etc/cloud/cloud-init.disabled
+ permissions: "0644"
+ content: ""
+
+
+# Doc 1 https://developer.vmware.com/apis/vsphere-automation/latest/vcenter/api/vcenter/vm/vm/guest/customization/put/
+# Doc 2 https://developer.vmware.com/docs/18555/GUID-75E27FA9-2E40-4CBF-BF3D-22DCFC8F11F7.html
+# >> metadata as JSON/YAML, userdata as no compression or base64 encoding
+# Error 400 com.vmware.vapi.std.errors.not_allowed_in_current_state : if the virtual machine vm is not in a powered off state.
+- name: Apply customization specification to the VM in Powered Off state
+ when: register_provisioned_host_single_info.value.power_state is defined and register_provisioned_host_single_info.value.power_state != "POWERED_ON"
+ vmware.vmware_rest.vcenter_vm_guest_customization:
+ vm: '{{ register_vmware_vm_cluster_host_id }}'
+ configuration_spec:
+ cloud_config:
+ type: CLOUDINIT
+ cloudinit:
+ metadata: "{{ metadata_yaml | to_json(ensure_ascii=true) }}"
+ userdata: "{{ userdata_yaml_text | trim }}" # remove last newline character
+ # linux_config:
+ interfaces: []
+ global_DNS_settings: {}
+
+
+- name: Ensure VM is Powered ON
+ register: register_vm_power_info
+ vmware.vmware_rest.vcenter_vm_power:
+ state: start
+ vm: "{{ register_vmware_vm_cluster_host_id }}"
+ # Wait until VM is powered on
+ until: (register_vm_power_info.value.error_type is defined and register_vm_power_info.value.error_type == "ALREADY_IN_DESIRED_STATE")
+ retries: 15
+ delay: 60
+
+- name: Show VM Information
+ register: register_vm_info
+ vmware.vmware_rest.vcenter_vm_info:
+ vm: '{{ register_vmware_vm_cluster_host_id }}'
+ # Wait until VM is powered on
+ until: register_vm_info.value.power_state == "POWERED_ON"
+ retries: 45
+ delay: 20
+
+- name: Get guest networking information (wait until DHCP assigns IP Address for host)
+ register: register_vm_nic_info
+ vmware.vmware_rest.vcenter_vm_guest_networking_interfaces_info:
+ vm: '{{ register_vmware_vm_cluster_host_id }}'
+ # Wait until VM Tools is running
+ until: (register_vm_nic_info.value.error_type | default("")) != "SERVICE_UNAVAILABLE" and (register_vm_nic_info.value[0].ip.ip_addresses | length) > 0
+ retries: 45
+ delay: 20
+
+
+# Use IP Address from the preferred vNIC
+- name: Create fact for delegate host IP
+ ansible.builtin.set_fact:
+ provisioned_private_ip: "{{ ((register_vm_nic_info.value | map(attribute='ip.ip_addresses')) | flatten | selectattr('state', '==', 'PREFERRED') | first).ip_address }}"
+
+
+- name: Collect only facts about hardware
+ register: host_disks_info
+ ansible.builtin.setup:
+ gather_subset:
+ - hardware
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: false
+ vars:
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+
+- name: Collect only facts about network
+ register: vmware_vm_network_info
+ ansible.builtin.setup:
+ gather_subset:
+ - default_ipv4
+ - network
+ remote_user: root
+ become: true
+ become_user: root
+ delegate_to: "{{ provisioned_private_ip }}"
+ delegate_facts: false
+ vars:
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_ssh_host_private_key_file_path }}"
+ ansible_ssh_common_args: -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no
+
+#- name: Output disks
+# ansible.builtin.debug:
+# var: hostvars[inventory_hostname].ansible_devices.keys() | list
+
+#- name: Debug Ansible Facts devices used list
+# ansible.builtin.debug:
+# msg: "{{ host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list }}"
+
+
+- name: Set fact for available storage volume device names
+ ansible.builtin.set_fact:
+ available_volumes: |-
+ {% set letters = 'bcdefghijklmnopqrstuvwxyz' %}
+ {% set ansible_facts_devices_used_list = host_disks_info.ansible_facts.ansible_device_links.ids.keys() | list %}
+ {% set volumes = [] %}
+ {%- for letter in letters -%}
+ {% for device in ansible_facts_devices_used_list -%}
+ {% if '/dev/sd' + letter not in device -%}
+ {% set dev = volumes.append('/dev/sd' + letter) %}
+ {%- endif %}
+ {%- endfor %}
+ {% endfor %}
+ {{ volumes | list | unique }}
+
+# - name: Debug available_volumes
+# ansible.builtin.debug:
+# msg: "{{ available_volumes }}"
+
+
+# Combination of only the filesystem volume information from the lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')
+# for volume device assignment.
+# This task assigns device names for each volume to be created.
+- name: Set fact for target device map
+ ansible.builtin.set_fact:
+ filesystem_volume_map: |
+ {% set volume_map = [] -%}
+ {% set av_vol = available_volumes -%}
+ {% for storage_item in lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].storage_definition -%}
+ {% for idx in range(0, storage_item.disk_count | default(1)) -%}
+ {% if (storage_item.filesystem_type is defined) -%}
+ {% if ('swap' in storage_item.filesystem_type and storage_item.swap_path is not defined)
+ or ('swap' not in storage_item.filesystem_type and storage_item.nfs_path is not defined) -%}
+ {% set vol = volume_map.extend([
+ {
+ 'definition_key': storage_item.name,
+ 'device': av_vol[0],
+ 'fstype': storage_item.filesystem_type | default('xfs'),
+ 'name': storage_item.name + idx|string,
+ 'size': storage_item.disk_size | default(0),
+ 'type': storage_item.disk_type | default('')
+ }
+ ]) %}
+ {%- set _ = av_vol.pop(0) -%}
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+ {%- endfor %}
+ {{ volume_map }}
+
+#- name: Debug filesystem_volume_map
+# ansible.builtin.debug:
+# msg: "{{ filesystem_volume_map }}"
+
+
+# The volume creation task requires the above task to define the parameter
+# which contains the calculated unique device names.
+- name: Provision Virtual Disk volumes and attach to VM
+ vmware.vmware_rest.vcenter_vm_hardware_disk:
+ vm: "{{ register_vmware_vm_cluster_host_id }}"
+ type: "{{ vol_item.type | upper }}"
+ state: present
+ label: "{{ vol_item.name }}"
+ new_vmdk:
+ name: "{{ inventory_hostname }}_{{ vol_item.name }}" # VMDK filename
+ capacity: "{{ vol_item.size | human_to_bytes(default_unit='G') }}"
+ loop: "{{ filesystem_volume_map }}"
+ loop_control:
+ loop_var: vol_item
+ index_var: vol_item_index
+ label: "{{ vol_item.definition_key }}: {{ vol_item.name }} (size: {{ vol_item.size }})"
+ when:
+ - vol_item.fstype is defined
+ - vol_item.size > 0
+ register: volume_provisioning
+ failed_when: not volume_provisioning.value is defined and not 'already exists' in volume_provisioning.msg
+
+
+- name: Append loop value to register
+ ansible.builtin.set_fact:
+ register_provisioned_host_single: "{{ register_provisioned_host_single_info | combine( { 'host_node' : inventory_hostname } , { 'vmware_vm_network_info' : vmware_vm_network_info } , { 'sap_host_type' : lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_host_type } , { 'sap_system_type' : (lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary')[sap_vm_provision_host_specification_plan][scaleout_origin_host_spec | default(inventory_hostname)].sap_system_type | default('')) } ) }}"
+
+- name: Append output to merged register
+ ansible.builtin.set_fact:
+ register_provisioned_host_all: "{{ register_provisioned_host_all + [register_provisioned_host_single] }}"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/post_deployment_execute.yml b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/post_deployment_execute.yml
new file mode 100644
index 0000000..19c7341
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible/vmware_vm/post_deployment_execute.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Post Deployment notification
+ ansible.builtin.debug:
+ msg: "There are no Post Deployment tasks for SAP on this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml
new file mode 100644
index 0000000..11f94e9
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/execute_main.yml
@@ -0,0 +1,184 @@
+---
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - AWS
+ register: terraform_template1_result
+ environment:
+ AWS_ACCESS_KEY_ID: "{{ sap_vm_provision_aws_access_key }}"
+ AWS_SECRET_ACCESS_KEY: "{{ sap_vm_provision_aws_secret_access_key }}"
+ AWS_REGION: "{{ sap_vm_provision_aws_region }}"
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ aws_access_key: "{{ sap_vm_provision_aws_access_key }}"
+ aws_secret_key: "{{ sap_vm_provision_aws_secret_access_key }}"
+ aws_vpc_availability_zone: "{{ sap_vm_provision_aws_vpc_availability_zone }}"
+ aws_vpc_subnet_id: "{{ sap_vm_provision_aws_vpc_subnet_id }}"
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ sap_vm_provision_bastion_os_image: "{{ sap_vm_provision_bastion_os_image }}"
+ sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+ sap_vm_provision_aws_ec2_vs_host_os_image: "{{ sap_vm_provision_aws_ec2_vs_host_os_image }}"
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for NFS
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ sap_vm_provision_nfs_mount_point: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_separate_sap_transport_dir'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_type: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_type'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_opts: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_opts'].value | default('') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template.tf
new file mode 100644
index 0000000..307bf3e
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template.tf
@@ -0,0 +1,205 @@
+# Terraform declaration
+
+terraform {
+ required_version = ">= 1.0, <= 1.5.5"
+ required_providers {
+ aws = {
+ #source = "localdomain/provider/aws" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/aws/1.xx.xx/darwin_amd6
+ source = "hashicorp/aws" // Terraform Registry
+ version = ">=3.73.0"
+ }
+ }
+}
+
+
+# Terraform Provider declaration
+
+provider "aws" {
+
+ # Define Provider inputs manually
+ # access_key = "xxxxxxx"
+ # secret_key = "xxxxxxx"
+ # region = "xxxxxxx"
+
+ # Define Provider inputs from given Terraform Variables
+ access_key = var.aws_access_key
+ secret_key = var.aws_secret_key
+ region = local.aws_region
+
+}
+
+
+module "run_account_init_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/account_init?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_aws_vpc_subnet_id = var.aws_vpc_subnet_id
+
+ module_var_aws_vpc_subnet_create_boolean = local.aws_vpc_subnet_create_boolean
+
+ module_var_aws_vpc_availability_zone = var.aws_vpc_availability_zone
+
+}
+
+
+module "run_account_bootstrap_module" {
+
+ depends_on = [
+ module.run_account_init_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/account_bootstrap?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_aws_vpc_subnet_id = local.aws_vpc_subnet_create_boolean ? module.run_account_init_module.output_aws_vpc_subnet_id : var.aws_vpc_subnet_id
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+}
+
+
+#module "run_account_iam_module" {
+#
+# depends_on = [
+# module.run_account_bootstrap_module
+# ]
+
+
+module "run_bastion_inject_module" {
+
+ depends_on = [
+ module.run_account_bootstrap_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/bastion_inject?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_aws_vpc_subnet_id = local.aws_vpc_subnet_create_boolean ? module.run_account_init_module.output_aws_vpc_subnet_id : var.aws_vpc_subnet_id
+ module_var_aws_vpc_igw_id = module.run_account_init_module.output_aws_vpc_igw_id
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_os_image = var.map_os_image_regex[var.sap_vm_provision_bastion_os_image]
+ module_var_bastion_ssh_key_name = module.run_account_bootstrap_module.output_bastion_ssh_key_name
+ module_var_bastion_public_ssh_key = module.run_account_bootstrap_module.output_bastion_public_ssh_key
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+
+ module_var_aws_vpc_availability_zone = var.aws_vpc_availability_zone
+
+}
+
+
+module "run_host_network_access_sap_module" {
+
+ depends_on = [
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/host_network_access_sap?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_aws_vpc_subnet_id = local.aws_vpc_subnet_create_boolean ? module.run_account_init_module.output_aws_vpc_subnet_id : var.aws_vpc_subnet_id
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_sap_nwas_abap_ascs_instance_no = var.sap_nwas_abap_ascs_instance_no
+ module_var_sap_nwas_abap_pas_instance_no = var.sap_nwas_abap_pas_instance_no
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_network_access_sap_public_via_proxy_module" {
+
+ depends_on = [
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/host_network_access_sap_public_via_proxy?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_aws_vpc_subnet_id = local.aws_vpc_subnet_create_boolean ? module.run_account_init_module.output_aws_vpc_subnet_id : var.aws_vpc_subnet_id
+
+ module_var_bastion_sg_id = module.run_bastion_inject_module.output_bastion_security_group_id
+ module_var_bastion_connection_sg_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+
+ module_var_sap_nwas_abap_pas_instance_no = var.sap_nwas_abap_pas_instance_no
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_nfs_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/host_nfs?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_aws_vpc_subnet_id = local.aws_vpc_subnet_create_boolean ? module.run_account_init_module.output_aws_vpc_subnet_id : var.aws_vpc_subnet_id
+ module_var_host_sg_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_nfs_boolean_sapmnt = sum(flatten(
+ [
+ for host in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] :
+ [ for storage_item in host["storage_definition"] : try(storage_item.nfs_path,"ignore") != "ignore" ? 1 : 0 ]
+ ] )) >0 ? true : false
+
+}
+
+
+module "run_host_provision_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module,
+ module.run_host_nfs_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//aws_ec2_instance/host_provision?ref=main"
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+
+ module_var_aws_vpc_subnet_id = local.aws_vpc_subnet_create_boolean ? module.run_account_init_module.output_aws_vpc_subnet_id : var.aws_vpc_subnet_id
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ module_var_bastion_ip = module.run_bastion_inject_module.output_bastion_ip
+ module_var_bastion_connection_sg_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+
+ module_var_host_ssh_key_name = module.run_account_bootstrap_module.output_host_ssh_key_name
+ module_var_host_ssh_public_key = module.run_account_bootstrap_module.output_host_public_ssh_key
+ module_var_host_ssh_private_key = module.run_account_bootstrap_module.output_host_private_ssh_key
+ module_var_host_sg_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_aws_ec2_vs_host_os_image]
+
+ module_var_dns_zone_id = module.run_account_bootstrap_module.output_dns_zone_id
+ module_var_dns_root_domain_name = module.run_account_bootstrap_module.output_dns_domain_name
+ module_var_dns_nameserver_list = module.run_account_bootstrap_module.output_dns_nameserver_list
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_host_name = each.key
+
+ module_var_aws_ec2_instance_type = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_machine_profile
+ module_var_disable_ip_anti_spoofing = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].disable_ip_anti_spoofing
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..adb8562
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,234 @@
+
+locals {
+
+ aws_vpc_subnet_create_boolean = var.aws_vpc_subnet_id == "new" ? true : false
+
+ # Directories start with "C:..." on Windows; All other OSs use "/" for root.
+ detect_windows = substr(pathexpand("~"), 0, 1) == "/" ? false : true
+ detect_shell = substr(pathexpand("~"), 0, 1) == "/" ? true : false
+
+ # Used for displaying Shell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ not_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? false : true : true
+
+ # Used for displaying Windows PowerShell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ is_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? true : false : false
+
+ aws_region = replace(var.aws_vpc_availability_zone,"/[a-c]$/","")
+
+}
+
+
+variable "aws_access_key" {
+ description = "AWS Access Key"
+}
+
+variable "aws_secret_key" {
+ description = "AWS Secret Key"
+}
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Prefix to resource names"
+}
+
+variable "aws_vpc_availability_zone" {
+ description = "Target AWS VPC Availability Zone (the AWS Region will be calculated from this value)"
+}
+
+variable "aws_vpc_subnet_id" {
+ description = "Enter existing/target VPC Subnet ID, or enter 'new' to create a VPC with a default VPC prefix range"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Server"
+}
+
+variable "sap_vm_provision_bastion_os_image" {
+ description = "Bastion OS Image. This variable uses the locals mapping with regex of OS Images, and will alter bastion provisioning."
+}
+
+variable "sap_vm_provision_bastion_user" {
+ description = "OS User to create on Bastion host to avoid pass-through root user (e.g. bastionuser)"
+}
+
+variable "sap_vm_provision_bastion_ssh_port" {
+ type = number
+ description = "Bastion host SSH Port from IANA Dynamic Ports range (49152 to 65535)"
+
+ #validation {
+ # condition = var.sap_vm_provision_bastion_ssh_port > 49152 && var.sap_vm_provision_bastion_ssh_port < 65535
+ # error_message = "Bastion host SSH Port must fall within IANA Dynamic Ports range (49152 to 65535)."
+ #}
+}
+
+
+variable "map_os_image_regex" {
+ description = "Map of operating systems OS Image regex, to identify latest OS Image for the OS major.minor version"
+ type = map(any)
+
+ default = {
+
+ rhel-8-1 = "*RHEL-8.1*_HVM*x86_64*"
+
+ rhel-8-2 = "*RHEL-8.2*_HVM*x86_64*"
+
+ rhel-8-4 = "*RHEL-8.4*_HVM*x86_64*"
+
+ rhel-8-6 = "*RHEL-8.6*_HVM*x86_64*"
+
+ rhel-7-7-sap-ha = "*RHEL-SAP-7.7*"
+
+ rhel-7-9-sap-ha = "*RHEL-SAP-7.9*"
+
+ rhel-8-1-sap-ha = "*RHEL-SAP-8.1.0*"
+
+ rhel-8-2-sap-ha = "*RHEL-SAP-8.2.0*"
+
+ rhel-8-4-sap-ha = "*RHEL-SAP-8.4.0*"
+
+ rhel-8-6-sap-ha = "*RHEL-SAP-8.6.0*"
+
+ sles-15-2 = "*suse-sles-15-sp2-v202*-hvm-ssd-x86_64*"
+
+ sles-15-3 = "*suse-sles-15-sp3-v202*-hvm-ssd-x86_64*"
+
+ sles-15-4 = "*suse-sles-15-sp4-v202*-hvm-ssd-x86_64*"
+
+ sles-12-5-sap-ha = "*suse-sles-sap-12-sp5-v202*-hvm-ssd-x86_64*"
+
+ sles-15-1-sap-ha = "*suse-sles-sap-15-sp1-v202*-hvm-ssd-x86_64*"
+
+ sles-15-2-sap-ha = "*suse-sles-sap-15-sp2-v202*-hvm-ssd-x86_64*"
+
+ sles-15-3-sap-ha = "*suse-sles-sap-15-sp3-v202*-hvm-ssd-x86_64*"
+
+ sles-15-4-sap-ha = "*suse-sles-sap-15-sp4-v202*-hvm-ssd-x86_64*"
+
+ }
+}
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_vm_provision_aws_ec2_vs_host_os_image" {
+ description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA install: Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+
+variable "map_host_specifications" {
+ description = "Map of host specficiations for SAP HANA single node install"
+ type = map(any)
+
+
+ default = {
+
+ xsmall_256gb = {
+
+ hana-p = { // Hostname
+
+ sap_host_type = "hana_primary" # hana_primary, nwas_ascs, nwas_pas, nwas_aas
+ ec2_instance_type = "r5.8xlarge"
+ disable_ip_anti_spoofing = false
+
+ storage_definition = [
+
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ #disk_count = 1
+ disk_size = 384
+ #disk_type = gp3
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_size = 384
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 384
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swapfile"
+ disk_size = 2
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 100
+ filesystem_type = "xfs"
+ }
+
+ ]
+
+ }
+
+ }
+
+ }
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..f5e29ba
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/aws_ec2_vs/tf_template/tf_template_outputs.tf
@@ -0,0 +1,61 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+output "bastion_os_user" {
+ value = var.sap_vm_provision_bastion_user
+}
+
+output "sap_vm_provision_bastion_public_ip" {
+ value = module.run_bastion_inject_module.output_bastion_ip
+}
+
+output "bastion_port" {
+ value = var.sap_vm_provision_bastion_ssh_port
+}
+
+
+output "sap_vm_provision_nfs_mount_point" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}:/", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_separate_sap_transport_dir" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}:/", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_type" {
+ value = "nfs4"
+}
+
+output "sap_vm_provision_nfs_mount_point_opts" {
+ value = "nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,acl"
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "bastion_rsa" {
+ content = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ filename = "${path.root}/ssh/bastion_rsa"
+ file_permission = "0400"
+}
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_account_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml
new file mode 100644
index 0000000..30d716b
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/execute_main.yml
@@ -0,0 +1,180 @@
+---
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - Google Cloud
+ register: terraform_template1_result
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ gcp_credentials_json: "{{ sap_vm_provision_gcp_credentials_json }}"
+ gcp_project: "{{ sap_vm_provision_gcp_project }}"
+ gcp_region_zone: "{{ sap_vm_provision_gcp_region_zone }}"
+ gcp_vpc_subnet_name: "{{ sap_vm_provision_gcp_vpc_subnet_name }}"
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ sap_vm_provision_bastion_os_image: "{{ sap_vm_provision_bastion_os_image }}"
+ sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+ sap_vm_provision_gcp_ce_vm_host_os_image: "{{ sap_vm_provision_gcp_ce_vm_host_os_image }}"
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for NFS
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ sap_vm_provision_nfs_mount_point: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_separate_sap_transport_dir'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_type: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_type'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_opts: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_opts'].value | default('') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template.tf
new file mode 100644
index 0000000..e9ca9a7
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template.tf
@@ -0,0 +1,201 @@
+
+# Terraform declaration
+
+terraform {
+ required_version = ">= 1.0, <= 1.5.5"
+ required_providers {
+ google = {
+ #source = "localdomain/provider/google" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/google/1.xx.xx/darwin_amd6
+ source = "hashicorp/google" // Terraform Registry
+ version = ">=4.50.0"
+ }
+ }
+}
+
+# Terraform Provider declaration
+#
+# Nested provider configurations cannot be used with depends_on meta-argument between modules
+#
+# The calling module block can use either:
+# - "providers" argument in the module block
+# - none, inherit default (un-aliased) provider configuration
+#
+# Therefore the below is blank and is only for reference if this module needs to be executed manually
+
+
+# Terraform Provider declaration
+
+provider "google" {
+ project = var.gcp_project
+ region = local.gcp_region
+ zone = var.gcp_region_zone
+
+ credentials = var.gcp_credentials_json
+
+}
+
+
+module "run_account_init_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/account_init?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_region = local.gcp_region
+ module_var_gcp_vpc_subnet_create_boolean = local.gcp_vpc_subnet_create_boolean
+ module_var_gcp_vpc_subnet_name = local.gcp_vpc_subnet_create_boolean ? 0 : var.gcp_vpc_subnet_name
+
+}
+
+
+module "run_account_bootstrap_module" {
+
+ depends_on = [
+ module.run_account_init_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/account_bootstrap?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_vpc_subnet_name = module.run_account_init_module.output_vpc_subnet_name
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+}
+
+
+module "run_bastion_inject_module" {
+
+ depends_on = [
+ module.run_account_bootstrap_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/bastion_inject?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_region = local.gcp_region
+ module_var_gcp_region_zone = var.gcp_region_zone
+ module_var_gcp_vpc_subnet_name = module.run_account_init_module.output_vpc_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_os_image = var.map_os_image_regex[var.sap_vm_provision_bastion_os_image]
+
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ module_var_bastion_public_ssh_key = module.run_account_bootstrap_module.output_bastion_public_ssh_key
+
+}
+
+
+module "run_host_network_access_sap_module" {
+
+ depends_on = [
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/host_network_access_sap?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_vpc_subnet_name = module.run_account_init_module.output_vpc_subnet_name
+
+ module_var_sap_nwas_abap_ascs_instance_no = var.sap_nwas_abap_ascs_instance_no
+ module_var_sap_nwas_abap_pas_instance_no = var.sap_nwas_abap_pas_instance_no
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_network_access_sap_public_via_proxy_module" {
+
+ depends_on = [
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/host_network_access_sap_public_via_proxy?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_vpc_subnet_name = module.run_account_init_module.output_vpc_subnet_name
+
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+ module_var_sap_nwas_abap_pas_instance_no = var.sap_nwas_abap_pas_instance_no
+
+ module_var_bastion_subnet_name = module.run_bastion_inject_module.output_bastion_subnet_name
+
+}
+
+
+module "run_host_nfs_module" {
+
+ depends_on = [
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/host_nfs?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_region_zone = var.gcp_region_zone
+ module_var_gcp_vpc_subnet_name = module.run_account_init_module.output_vpc_subnet_name
+
+ module_var_nfs_boolean_sapmnt = sum(flatten(
+ [
+ for host in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] :
+ [ for storage_item in host["storage_definition"] : try(storage_item.nfs_path,"ignore") != "ignore" ? 1 : 0 ]
+ ] )) >0 ? true : false
+
+}
+
+
+module "run_host_provision_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module,
+ module.run_host_nfs_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//gcp_ce_vm/host_provision?ref=main"
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_gcp_region_zone = var.gcp_region_zone
+ module_var_gcp_vpc_subnet_name = module.run_account_init_module.output_vpc_subnet_name
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+ module_var_dns_zone_name = module.run_account_bootstrap_module.output_dns_zone_name
+
+ module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_gcp_ce_vm_host_os_image]
+
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ module_var_bastion_ip = module.run_bastion_inject_module.output_bastion_ip
+
+ module_var_host_ssh_public_key = module.run_account_bootstrap_module.output_host_public_ssh_key
+ module_var_host_ssh_private_key = module.run_account_bootstrap_module.output_host_private_ssh_key
+
+
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_virtual_machine_hostname = each.key
+
+ module_var_virtual_machine_profile = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_machine_profile
+ module_var_disable_ip_anti_spoofing = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].disable_ip_anti_spoofing
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+}
+
+
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..e1344b1
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,267 @@
+
+locals {
+
+ gcp_vpc_subnet_create_boolean = var.gcp_vpc_subnet_name == "new" ? true : false
+
+ gcp_region = replace(var.gcp_region_zone, "/-[^-]*$/", "")
+
+ # Directories start with "C:..." on Windows; All other OSs use "/" for root.
+ detect_windows = substr(pathexpand("~"), 0, 1) == "/" ? false : true
+ detect_shell = substr(pathexpand("~"), 0, 1) == "/" ? true : false
+
+ # Used for displaying Shell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ not_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? false : true : true
+
+ # Used for displaying Windows PowerShell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ is_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? true : false : false
+
+}
+
+
+variable "gcp_project" {
+ description = "Target GCP Project ID"
+}
+
+variable "gcp_region_zone" {
+ description = "Target GCP Zone, the GCP Region will be calculated from this value (e.g. europe-west9-a)"
+}
+
+variable "gcp_credentials_json" {
+ description = "Enter path to GCP Key File for Service Account (or Google Application Default Credentials JSON file for GCloud CLI)"
+}
+
+variable "gcp_vpc_subnet_name" {
+ description = "Enter existing/target VPC Subnet name, or enter 'new' to create a VPC"
+}
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Prefix to resource names"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Machine"
+}
+
+variable "sap_vm_provision_bastion_os_image" {
+ description = "Bastion OS Image. This variable uses the locals mapping with regex of OS Images, and will alter bastion provisioning."
+}
+
+variable "sap_vm_provision_bastion_user" {
+ description = "OS User to create on Bastion host to avoid pass-through root user (e.g. bastionuser)"
+}
+
+variable "sap_vm_provision_bastion_ssh_port" {
+ type = number
+ description = "Bastion host SSH Port from IANA Dynamic Ports range (49152 to 65535)"
+
+ validation {
+ condition = var.sap_vm_provision_bastion_ssh_port > 49152 && var.sap_vm_provision_bastion_ssh_port < 65535
+ error_message = "Bastion host SSH Port must fall within IANA Dynamic Ports range (49152 to 65535)."
+ }
+}
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_vm_provision_gcp_ce_vm_host_os_image" {
+ description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA install: Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+
+# There is no Terraform Resource for data lookup of all GCP OS Images, therefore the input does not use wildcard
+variable "map_os_image_regex" {
+
+ description = "Map of operating systems OS Image, static OS Image names, to identify latest OS Image for the OS major.minor version"
+
+ type = map(any)
+
+ default = {
+
+ rhel-8-latest = {
+ project = "rhel-cloud"
+ family = "rhel-8"
+ },
+
+ rhel-7-7-sap-ha = {
+ project = "rhel-sap-cloud"
+ family = "rhel-7-7-sap-ha"
+ },
+
+ rhel-7-9-sap-ha = {
+ project = "rhel-sap-cloud"
+ family = "rhel-7-9-sap-ha"
+ },
+
+ rhel-8-1-sap-ha = {
+ project = "rhel-sap-cloud"
+ family = "rhel-8-1-sap-ha"
+ },
+
+ rhel-8-2-sap-ha = {
+ project = "rhel-sap-cloud"
+ family = "rhel-8-2-sap-ha"
+ },
+
+ rhel-8-4-sap-ha = {
+ project = "rhel-sap-cloud"
+ family = "rhel-8-4-sap-ha"
+ },
+
+ rhel-8-6-sap-ha = {
+ project = "rhel-sap-cloud"
+ family = "rhel-8-6-sap-ha"
+ },
+
+ sles-15-latest = {
+ project = "suse-cloud"
+ family = "sles-15"
+ },
+
+ sles-12-sp5-sap = {
+ project = "suse-sap-cloud"
+ family = "sles-12-sp5-sap"
+ },
+
+ sles-15-sp1-sap = {
+ project = "suse-sap-cloud"
+ family = "sles-15-sp1-sap"
+ },
+
+ sles-15-sp2-sap = {
+ project = "suse-sap-cloud"
+ family = "sles-15-sp2-sap"
+ },
+
+ sles-15-sp3-sap = {
+ project = "suse-sap-cloud"
+ family = "sles-15-sp3-sap"
+ },
+
+ sles-15-sp4-sap = {
+ project = "suse-sap-cloud"
+ family = "sles-15-sp4-sap"
+ },
+
+ }
+
+}
+
+variable "map_host_specifications" {
+ description = "Map of host specficiations for SAP HANA single node install"
+ type = map(any)
+
+
+ default = {
+
+ xsmall_256gb = {
+
+ hana-p = { // Hostname
+
+ sap_host_type = "hana_primary" # hana_primary, nwas_ascs, nwas_pas, nwas_aas
+ virtual_machine_profile = "n2-highmem-32" // 32 vCPU, 256GB Memory
+ disable_ip_anti_spoofing = false
+
+ storage_definition = [
+
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ #disk_count = 1
+ disk_size = 384
+ disk_type = "pd-ssd"
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_size = 128
+ disk_type = "pd-ssd"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 320
+ disk_type = "pd-balanced"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swapfile"
+ disk_size = 2
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 100
+ filesystem_type = "xfs"
+ }
+
+ ]
+
+ }
+
+ }
+
+ }
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..4529593
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/gcp_ce_vm/tf_template/tf_template_outputs.tf
@@ -0,0 +1,61 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+output "bastion_os_user" {
+ value = var.sap_vm_provision_bastion_user
+}
+
+output "sap_vm_provision_bastion_public_ip" {
+ value = module.run_bastion_inject_module.output_bastion_ip
+}
+
+output "bastion_port" {
+ value = var.sap_vm_provision_bastion_ssh_port
+}
+
+
+output "sap_vm_provision_nfs_mount_point" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_separate_sap_transport_dir" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_type" {
+ value = "nfs3"
+}
+
+output "sap_vm_provision_nfs_mount_point_opts" {
+ value = "nfsvers=3,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=3,resvport,_netdev,rw,intr"
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "bastion_rsa" {
+ content = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ filename = "${path.root}/ssh/bastion_rsa"
+ file_permission = "0400"
+}
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_account_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml
new file mode 100644
index 0000000..1789ca8
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/execute_main.yml
@@ -0,0 +1,180 @@
+---
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - IBM Cloud
+ register: terraform_template1_result
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}"
+ ibmcloud_resource_group: "{{ sap_vm_provision_ibmcloud_resource_group_name }}"
+ ibmcloud_vpc_availability_zone: "{{ sap_vm_provision_ibmcloud_availability_zone }}"
+ ibmcloud_vpc_subnet_name: "{{ sap_vm_provision_ibmcloud_vpc_subnet_name }}"
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ sap_vm_provision_bastion_os_image: "{{ sap_vm_provision_bastion_os_image }}"
+ sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+ sap_vm_provision_ibmcloud_vs_host_os_image: "{{ sap_vm_provision_ibmcloud_vs_host_os_image }}"
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for NFS
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ sap_vm_provision_nfs_mount_point: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_separate_sap_transport_dir'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_type: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_type'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_opts: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_opts'].value | default('') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf
new file mode 100644
index 0000000..9d77d69
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template.tf
@@ -0,0 +1,319 @@
+# Terraform declaration
+
+terraform {
+ required_version = ">= 1.0, <= 1.5.7"
+ required_providers {
+ ibm = {
+ #source = "localdomain/provider/ibm" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/ibm/1.xx.xx/darwin_amd6
+ source = "IBM-Cloud/ibm" // Terraform Registry
+ version = ">=1.45.0"
+ }
+ }
+}
+
+
+# Terraform Provider declaration
+
+provider "ibm" {
+
+ alias = "standard"
+
+ # Define Provider inputs manually
+ # ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+
+ # Define Provider inputs from given Terraform Variables
+ ibmcloud_api_key = var.ibmcloud_api_key
+
+ # If using IBM Cloud Automation Manager, the Provider declaration values are populated automatically
+ # from the Cloud Connection credentials (by using Environment Variables)
+
+ # If using IBM Cloud Schematics, the Provider declaration values are populated automatically
+
+ region = local.ibmcloud_region
+
+ zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only
+
+}
+
+
+provider "ibm" {
+
+ alias = "powervs_secure"
+
+ ibmcloud_api_key = var.ibmcloud_api_key
+
+ region = local.ibmcloud_powervs_region
+
+ zone = lower(var.ibmcloud_powervs_location) // Required for IBM Power VS only
+
+}
+
+
+module "run_account_init_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/account_init?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ module_var_resource_group_name = local.resource_group_create_boolean ? 0 : var.ibmcloud_resource_group
+ module_var_resource_group_create_boolean = local.resource_group_create_boolean
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? 0 : var.ibmcloud_vpc_subnet_name
+ module_var_ibmcloud_vpc_subnet_create_boolean = local.ibmcloud_vpc_subnet_create_boolean
+ module_var_ibmcloud_vpc_availability_zone = var.map_ibm_powervs_to_vpc_az[lower(var.ibmcloud_powervs_location)]
+
+}
+
+
+module "run_account_bootstrap_module" {
+
+ depends_on = [
+ module.run_account_init_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/account_bootstrap?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+ module_var_ibmcloud_vpc_availability_zone = var.map_ibm_powervs_to_vpc_az[lower(var.ibmcloud_powervs_location)]
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+}
+
+
+#module "run_account_iam_module" {
+#
+# depends_on = [
+# module.run_account_bootstrap_module
+# ]
+#
+# count = var.ibmcloud_iam_yesno == "yes" ? 1 : 0
+#
+# source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/account_iam?ref=main"
+#
+# module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+# module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+#
+#}
+
+
+module "run_bastion_inject_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/bastion_inject?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_resource_tags = var.resource_tags
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_ssh_key_id = module.run_account_bootstrap_module.output_bastion_ssh_key_id
+ module_var_bastion_public_ssh_key = module.run_account_bootstrap_module.output_bastion_public_ssh_key
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+
+ module_var_bastion_os_image = var.map_os_image_regex_bastion[var.sap_vm_provision_bastion_os_image]
+
+}
+
+
+module "run_host_network_access_sap_public_via_proxy_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/host_network_access_sap_public_via_proxy?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_security_group_id = module.run_bastion_inject_module.output_bastion_security_group_id
+ module_var_bastion_connection_security_group_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_account_bootstrap_powervs_workspace_module" {
+
+ depends_on = [
+ module.run_account_bootstrap_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_powervs/account_bootstrap_powervs_workspace?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_ibmcloud_power_zone = lower(var.ibmcloud_powervs_location)
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+}
+
+
+module "run_account_bootstrap_powervs_networks_module" {
+
+ depends_on = [
+ module.run_account_bootstrap_module,
+ module.run_account_bootstrap_powervs_workspace_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_powervs/account_bootstrap_powervs_networks?ref=main"
+
+ providers = { ibm = ibm.powervs_secure }
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_ibmcloud_power_zone = lower(var.ibmcloud_powervs_location)
+ module_var_ibmcloud_powervs_workspace_guid = module.run_account_bootstrap_powervs_workspace_module.output_power_group_guid
+ module_var_ibmcloud_vpc_crn = module.run_account_bootstrap_powervs_workspace_module.output_power_target_vpc_crn
+
+}
+
+
+module "run_powervs_interconnect_sg_update_module" {
+
+ depends_on = [
+ module.run_bastion_inject_module,
+ module.run_account_bootstrap_powervs_networks_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/powervs_interconnect_sg_update?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ module_var_bastion_security_group_id = module.run_bastion_inject_module.output_bastion_security_group_id
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_power_group_network_private_subnet = module.run_account_bootstrap_powervs_networks_module.output_power_group_network_private_subnet
+
+}
+
+
+module "run_powervs_interconnect_proxy_provision_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module,
+ module.run_powervs_interconnect_sg_update_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/powervs_interconnect_proxy_provision?ref=main"
+
+ providers = { ibm = ibm.standard }
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_resource_tags = var.resource_tags
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_public_ssh_key = module.run_account_bootstrap_module.output_bastion_public_ssh_key
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+
+ module_var_bastion_floating_ip = module.run_bastion_inject_module.output_bastion_ip
+ module_var_bastion_connection_security_group_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+
+ module_var_host_ssh_key_id = module.run_account_bootstrap_module.output_host_ssh_key_id
+ module_var_host_private_ssh_key = module.run_account_bootstrap_module.output_host_private_ssh_key
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_proxy_os_image = var.map_os_image_regex[var.sap_vm_provision_bastion_os_image]
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+ module_var_dns_services_instance = module.run_account_bootstrap_module.output_host_dns_services_instance
+
+ module_var_virtual_server_hostname = "${var.sap_vm_provision_resource_prefix}-powervs-proxy"
+
+ module_var_virtual_server_profile = "cx2-8x16"
+
+}
+
+
+module "run_host_provision_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module,
+ module.run_powervs_interconnect_sg_update_module,
+ module.run_powervs_interconnect_proxy_provision_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_powervs/host_provision?ref=main"
+
+ providers = { ibm = ibm.powervs_secure }
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_resource_tags = var.resource_tags
+
+ module_var_ibm_power_group_guid = module.run_account_bootstrap_powervs_workspace_module.output_power_group_guid
+ module_var_power_group_networks = module.run_account_bootstrap_powervs_networks_module.output_power_group_networks
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ module_var_bastion_ip = module.run_bastion_inject_module.output_bastion_ip
+
+ module_var_host_public_ssh_key = module.run_account_bootstrap_module.output_host_public_ssh_key
+ module_var_host_private_ssh_key = module.run_account_bootstrap_module.output_host_private_ssh_key
+
+ module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_ibmcloud_vs_host_os_image]
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+ module_var_dns_services_instance = module.run_account_bootstrap_module.output_host_dns_services_instance
+
+ module_var_dns_proxy_ip = module.run_powervs_interconnect_proxy_provision_module.output_proxy_private_ip
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_virtual_server_hostname = each.key
+
+ module_var_virtual_server_profile = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_server_profile
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+ module_var_web_proxy_enable = false
+ module_var_os_vendor_enable = false
+
+ module_var_web_proxy_url = ""
+ module_var_web_proxy_exclusion = ""
+
+ module_var_os_vendor_account_user = ""
+ module_var_os_vendor_account_user_passcode = ""
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..e655c31
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,347 @@
+
+# This file defines all Terraform Input Variables, with values to be provided interactively or using a vars file
+
+locals {
+
+ resource_group_create_boolean = var.ibmcloud_resource_group == "new" ? true : false
+
+ ibmcloud_vpc_subnet_create_boolean = var.ibmcloud_vpc_subnet_name == "new" ? true : false
+
+ ibmcloud_vpc_subnet_name_entry_is_ip = (
+ can(
+ regex("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/][0-3][0-2]?|[/][1-2][0-9]|[/][0-9])$",
+ var.ibmcloud_vpc_subnet_name
+ )
+ ) ? true : false)
+
+ # ibmcloud_region = replace(var.ibmcloud_vpc_availability_zone, "/-[0-9]/", "")
+ ibmcloud_region = replace(var.map_ibm_powervs_to_vpc_az[lower(var.ibmcloud_powervs_location)], "/-[0-9]/", "")
+
+ # Ensure lowercase to avoid API case-sensitive errors such as "pcloudNetworksPostForbidden Code 403 Error crn regionZone WDC06 is not supported under the current region"
+ ibmcloud_powervs_region = lower(var.map_ibm_powervs_location_to_powervs_region[lower(var.ibmcloud_powervs_location)])
+
+}
+
+
+variable "map_ibm_powervs_to_vpc_az" {
+
+ description = "Map of IBM Power VS location to the colocated IBM Cloud VPC Infrastructure Availability Zone"
+
+ type = map(any)
+
+ default = {
+
+ dal12 = "us-south-2"
+ us-south = "us-south-3" // naming of IBM Power VS location 'us-south' was previous naming convention, would otherwise be 'DAL13'
+ us-east = "us-east-1" // naming of IBM Power VS location 'us-east' was previous naming convention, would otherwise be 'WDC04'
+ # wdc06 = "us-east-2" // No Cloud Connection available at this location
+ sao01 = "br-sao-1"
+ tor01 = "ca-tor-1"
+ eu-de-1 = "eu-de-2" // naming of IBM Power VS location 'eu-de-1' was previous naming convention, would otherwise be 'FRA04'
+ eu-de-2 = "eu-de-3" // naming of IBM Power VS location 'eu-de-2' was previous naming convention, would otherwise be 'FRA05'
+ lon04 = "eu-gb-1"
+ lon06 = "eu-gb-3"
+ syd04 = "au-syd-2"
+ syd05 = "au-syd-3"
+ tok04 = "jp-tok-2"
+ osa21 = "jp-osa-1"
+
+ }
+
+}
+
+
+# IBM Cloud Regional API Endpoint = https://<>.cloud.ibm.com/
+# IBM Power VS (on IBM Cloud) Regional API Endpoint = https://<>.power-iaas.cloud.ibm.com/
+variable "map_ibm_powervs_location_to_powervs_region" {
+
+ description = "Map of IBM Power VS location to the secured IBM Power VS Region API Endpoints"
+
+ type = map(any)
+
+ default = {
+
+ dal12 = "us-south"
+ us-south = "us-south"
+ us-east = "us-east"
+ # wdc06 = "us-east" // no Cloud Connection available at this location
+ sao01 = "sao"
+ tor01 = "tor"
+ eu-de-1 = "eu-de"
+ eu-de-2 = "eu-de"
+ lon04 = "lon"
+ lon06 = "lon"
+ syd04 = "syd"
+ syd05 = "syd"
+ tok04 = "tok"
+ osa21 = "osa"
+
+ }
+
+}
+
+
+variable "ibmcloud_api_key" {
+ description = "Enter your IBM Cloud API Key"
+}
+
+variable "resource_tags" {
+ type = list(string)
+ description = "Tags applied to each resource created"
+ default = [ "sap" ]
+}
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Prefix to resource names"
+}
+
+variable "ibmcloud_resource_group" {
+ description = "Enter existing/target Resource Group name, or enter 'new' to create a Resource Group using the defined prefix for all resources"
+}
+
+variable "ibmcloud_powervs_location" {
+ description = "Target IBM Power VS location (e.g. lon06). Each location is colocated at a IBM Cloud VPC Infrastructure Availability Zone (e.g. eu-gb-3)"
+}
+
+#variable "ibmcloud_iam_yesno" {
+# description = "Please choose 'yes' or 'no' for setup of default IBM Cloud Identity and Access Management (IAM) controls, for use by technicians to view and edit resources of SAP Systems run on IBM Cloud (NOTE: Requires admin privileges on API Key)"
+#}
+
+variable "ibmcloud_vpc_subnet_name" {
+ description = "Enter existing/target VPC Subnet name, or enter 'new' to create a VPC with a default VPC Address Prefix Range. If using an existing VPC Subnet, it must be attached to a Public Gateway (i.e. SNAT)"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Server"
+}
+
+variable "sap_vm_provision_bastion_os_image" {
+ description = "Bastion OS Image. This variable uses the locals mapping with regex of OS Images, and will alter bastion provisioning."
+}
+
+variable "sap_vm_provision_bastion_user" {
+ description = "OS User to create on Bastion host to avoid pass-through root user (e.g. bastionuser)"
+}
+
+variable "sap_vm_provision_bastion_ssh_port" {
+ type = number
+ description = "Bastion host SSH Port from IANA Dynamic Ports range (49152 to 65535)"
+
+ validation {
+ condition = var.sap_vm_provision_bastion_ssh_port > 49152 && var.sap_vm_provision_bastion_ssh_port < 65535
+ error_message = "Bastion host SSH Port must fall within IANA Dynamic Ports range (49152 to 65535)."
+ }
+}
+
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_vm_provision_ibmcloud_vs_host_os_image" {
+ description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA - Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+
+variable "map_os_image_regex_bastion" {
+
+ description = "Map of operating systems OS Image regex, to identify latest OS Image for the OS major.minor version"
+
+ type = map(any)
+
+ default = {
+
+ rhel-7-6-sap-ha = ".*redhat.*7-6.*amd64.*hana.*"
+
+ rhel-8-1-sap-ha = ".*redhat.*8-1.*amd64.*hana.*"
+
+ rhel-8-2-sap-ha = ".*redhat.*8-2.*amd64.*hana.*"
+
+ rhel-8-4-sap-ha = ".*redhat.*8-4.*amd64.*hana.*"
+
+ rhel-7-6-sap-applications = ".*redhat.*7-6.*amd64.*applications.*"
+
+ rhel-8-1-sap-applications = ".*redhat.*8-1.*amd64.*applications.*"
+
+ rhel-8-2-sap-applications = ".*redhat.*8-2.*amd64.*applications.*"
+
+ rhel-8-4-sap-applications = ".*redhat.*8-4.*amd64.*applications.*"
+
+ rhel-8-4 = ".*redhat.*8-4.*minimal.*amd64.*"
+
+ sles-12-4-sap-ha = ".*sles.*12-4.*amd64.*hana.*"
+
+ sles-15-1-sap-ha = ".*sles.*15-1.*amd64.*hana.*"
+
+ sles-15-2-sap-ha = ".*sles.*15-2.*amd64.*hana.*"
+
+ sles-12-4-sap-applications = ".*sles.*12-4.*amd64.*applications.*"
+
+ sles-15-1-sap-applications = ".*sles.*15-1.*amd64.*applications.*"
+
+ sles-15-2-sap-applications = ".*sles.*15-2.*amd64.*applications.*"
+
+ }
+
+}
+
+
+variable "map_os_image_regex" {
+
+ description = "Map of operating systems OS Image regex, to identify latest OS Image for the OS major.minor version"
+
+ type = map(any)
+
+ default = {
+
+ rhel-8-4 = ".*RHEL.*8.*4"
+
+ rhel-8-6 = ".*RHEL.*8.*6"
+
+ rhel-9-2 = ".*RHEL.*9.*2"
+
+ sles-15-3 = ".*SLES.*15.*3"
+
+ sles-15-4 = ".*SLES.*15.*4"
+
+ rhel-8-4-sap-ha = ".*RHEL.*8.*4.*SAP$" # ensure string suffix using $
+
+ rhel-8-6-sap-ha = ".*RHEL.*8.*6.*SAP$" # ensure string suffix using $
+
+ sles-15-2-sap = ".*SLES.*15.*2.*SAP$" # ensure string suffix using $
+
+ sles-15-3-sap = ".*SLES.*15.*3.*SAP$" # ensure string suffix using $
+
+ sles-15-4-sap = ".*SLES.*15.*4.*SAP$" # ensure string suffix using $
+
+ }
+
+}
+
+variable "map_host_specifications" {
+
+ description = "Map of host specficiations for SAP HANA single node install"
+
+ type = map(any)
+
+ default = {
+
+ small_256gb = {
+
+ hana01 = { // Hostname
+ virtual_server_profile = "ush1-4x256"
+ // An IBM PowerVS host will be set to Tier 1 or Tier 3 storage type, and cannot use block storage volumes from both storage types
+ // Therefore all block storage volumes are provisioned with Tier 1 (this cannot be changed once provisioned)
+ // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-about-virtual-server#storage-tiers
+ storage_definition = [
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ disk_size = 384
+ disk_type = "tier1"
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_size = 144
+ disk_type = "tier1"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 256
+ disk_type = "tier1"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "usr_sap"
+ mountpoint = "/usr/sap"
+ disk_size = 96
+ disk_type = "tier1"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "sapmnt"
+ mountpoint = "/sapmnt"
+ disk_size = 96
+ disk_type = "tier1"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swap"
+ disk_size = 32
+ disk_type = "tier1"
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 100
+ disk_type = "tier1"
+ filesystem_type = "xfs"
+ }
+ ]
+ }
+
+
+ }
+ }
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..269e517
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_powervs/tf_template/tf_template_outputs.tf
@@ -0,0 +1,44 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+output "bastion_os_user" {
+ value = var.sap_vm_provision_bastion_user
+}
+
+output "sap_vm_provision_bastion_public_ip" {
+ value = module.run_bastion_inject_module.output_bastion_ip
+}
+
+output "bastion_port" {
+ value = var.sap_vm_provision_bastion_ssh_port
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "bastion_rsa" {
+ content = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ filename = "${path.root}/ssh/bastion_rsa"
+ file_permission = "0400"
+}
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_account_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml
new file mode 100644
index 0000000..1789ca8
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/execute_main.yml
@@ -0,0 +1,180 @@
+---
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - IBM Cloud
+ register: terraform_template1_result
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ ibmcloud_api_key: "{{ sap_vm_provision_ibmcloud_api_key }}"
+ ibmcloud_resource_group: "{{ sap_vm_provision_ibmcloud_resource_group_name }}"
+ ibmcloud_vpc_availability_zone: "{{ sap_vm_provision_ibmcloud_availability_zone }}"
+ ibmcloud_vpc_subnet_name: "{{ sap_vm_provision_ibmcloud_vpc_subnet_name }}"
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ sap_vm_provision_bastion_os_image: "{{ sap_vm_provision_bastion_os_image }}"
+ sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+ sap_vm_provision_ibmcloud_vs_host_os_image: "{{ sap_vm_provision_ibmcloud_vs_host_os_image }}"
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for NFS
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ sap_vm_provision_nfs_mount_point: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_separate_sap_transport_dir'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_type: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_type'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_opts: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_opts'].value | default('') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template.tf
new file mode 100644
index 0000000..719fd38
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template.tf
@@ -0,0 +1,223 @@
+# Terraform declaration
+
+terraform {
+ required_version = ">= 1.0, <= 1.5.5"
+ required_providers {
+ ibm = {
+ #source = "localdomain/provider/ibm" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/ibm/1.xx.xx/darwin_amd6
+ source = "IBM-Cloud/ibm" // Terraform Registry
+ version = ">=1.45.0"
+ }
+ }
+}
+
+
+# Terraform Provider declaration
+
+provider "ibm" {
+
+ # Define Provider inputs manually
+ # ibmcloud_api_key = "xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+
+ # Define Provider inputs from given Terraform Variables
+ ibmcloud_api_key = var.ibmcloud_api_key
+
+ # If using IBM Cloud Automation Manager, the Provider declaration values are populated automatically
+ # from the Cloud Connection credentials (by using Environment Variables)
+
+ # If using IBM Cloud Schematics, the Provider declaration values are populated automatically
+
+ region = local.ibmcloud_region
+
+}
+
+
+module "run_account_init_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/account_init?ref=main"
+
+ module_var_resource_group_name = local.resource_group_create_boolean ? 0 : var.ibmcloud_resource_group
+ module_var_resource_group_create_boolean = local.resource_group_create_boolean
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? 0 : var.ibmcloud_vpc_subnet_name
+ module_var_ibmcloud_vpc_subnet_create_boolean = local.ibmcloud_vpc_subnet_create_boolean
+ module_var_ibmcloud_vpc_availability_zone = var.ibmcloud_vpc_availability_zone
+
+}
+
+
+module "run_account_bootstrap_module" {
+
+ depends_on = [
+ module.run_account_init_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/account_bootstrap?ref=main"
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+ module_var_ibmcloud_vpc_availability_zone = var.ibmcloud_vpc_availability_zone
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+}
+
+
+#module "run_account_iam_module" {
+#
+# depends_on = [
+# module.run_account_bootstrap_module
+# ]
+#
+# count = var.ibmcloud_iam_yesno == "yes" ? 1 : 0
+#
+# source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/account_iam?ref=main"
+#
+# module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+# module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+#
+#}
+
+
+module "run_bastion_inject_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/bastion_inject?ref=main"
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_resource_tags = var.resource_tags
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_ssh_key_id = module.run_account_bootstrap_module.output_bastion_ssh_key_id
+ module_var_bastion_public_ssh_key = module.run_account_bootstrap_module.output_bastion_public_ssh_key
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+
+ module_var_bastion_os_image = var.map_os_image_regex[var.sap_vm_provision_bastion_os_image]
+
+}
+
+
+module "run_host_network_access_sap_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/host_network_access_sap?ref=main"
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_network_access_sap_public_via_proxy_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/host_network_access_sap_public_via_proxy?ref=main"
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_security_group_id = module.run_bastion_inject_module.output_bastion_security_group_id
+ module_var_bastion_connection_security_group_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_nfs_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/host_nfs?ref=main"
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_nfs_boolean_sapmnt = sum(flatten(
+ [
+ for host in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] :
+ [ for storage_item in host["storage_definition"] : try(storage_item.nfs_path,"ignore") != "ignore" ? 1 : 0 ]
+ ] )) >0 ? true : false
+
+}
+
+
+module "run_host_provision_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmcloud_vs/host_provision?ref=main"
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+
+ module_var_resource_group_id = module.run_account_init_module.output_resource_group_id
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+ module_var_resource_tags = var.resource_tags
+
+ module_var_ibmcloud_vpc_subnet_name = local.ibmcloud_vpc_subnet_create_boolean ? module.run_account_init_module.output_vpc_subnet_name : var.ibmcloud_vpc_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+
+ module_var_bastion_floating_ip = module.run_bastion_inject_module.output_bastion_ip
+ module_var_bastion_connection_security_group_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+
+ module_var_host_ssh_key_id = module.run_account_bootstrap_module.output_host_ssh_key_id
+ module_var_host_private_ssh_key = module.run_account_bootstrap_module.output_host_private_ssh_key
+ module_var_host_security_group_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_ibmcloud_vs_host_os_image]
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+ module_var_dns_services_instance = module.run_account_bootstrap_module.output_host_dns_services_instance
+
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_virtual_server_hostname = each.key
+
+ module_var_virtual_server_profile = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_machine_profile
+
+ module_var_disable_ip_anti_spoofing = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].disable_ip_anti_spoofing
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..e5c62b7
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,258 @@
+
+# This file defines all Terraform Input Variables, with values to be provided interactively or using a vars file
+
+locals {
+
+ resource_group_create_boolean = var.ibmcloud_resource_group == "new" ? true : false
+
+ ibmcloud_vpc_subnet_create_boolean = var.ibmcloud_vpc_subnet_name == "new" ? true : false
+
+ ibmcloud_vpc_subnet_name_entry_is_ip = (
+ can(
+ regex("^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([/][0-3][0-2]?|[/][1-2][0-9]|[/][0-9])$",
+ var.ibmcloud_vpc_subnet_name
+ )
+ ) ? true : false)
+
+ ibmcloud_region = replace(var.ibmcloud_vpc_availability_zone, "/-[^-]*$/", "")
+
+ # Directories start with "C:..." on Windows; All other OSs use "/" for root.
+ detect_windows = substr(pathexpand("~"), 0, 1) == "/" ? false : true
+ detect_shell = substr(pathexpand("~"), 0, 1) == "/" ? true : false
+
+ # Used for displaying Shell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ not_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? false : true : true
+
+ # Used for displaying Windows PowerShell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ is_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? true : false : false
+
+}
+
+
+variable "ibmcloud_api_key" {
+ description = "Enter your IBM Cloud API Key"
+}
+
+variable "resource_tags" {
+ type = list(string)
+ description = "Tags applied to each resource created"
+ default = [ "sap" ]
+}
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Prefix to resource names"
+}
+
+variable "ibmcloud_resource_group" {
+ description = "Enter existing/target Resource Group name, or enter 'new' to create a Resource Group using the defined prefix for all resources"
+}
+
+variable "ibmcloud_vpc_availability_zone" {
+ description = "Target IBM Cloud Availability Zone (e.g. us-south-1). The IBM Cloud Region will be calculated from this value"
+
+ validation {
+ error_message = "Please enter an IBM Cloud Availability Zone (e.g. us-south-1)."
+ condition = can(regex("^([a-zA-Z0-9]*-[a-zA-Z0-9]*){2}$", var.ibmcloud_vpc_availability_zone))
+ }
+
+}
+
+#variable "ibmcloud_iam_yesno" {
+# description = "Please choose 'yes' or 'no' for setup of default IBM Cloud Identity and Access Management (IAM) controls, for use by technicians to view and edit resources of SAP Systems run on IBM Cloud (NOTE: Requires admin privileges on API Key)"
+#}
+
+variable "ibmcloud_vpc_subnet_name" {
+ description = "Enter existing/target VPC Subnet name, or enter 'new' to create a VPC with a default VPC Address Prefix Range. If using an existing VPC Subnet, it must be attached to a Public Gateway (i.e. SNAT)"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Server"
+}
+
+variable "sap_vm_provision_bastion_os_image" {
+ description = "Bastion OS Image. This variable uses the locals mapping with regex of OS Images, and will alter bastion provisioning."
+}
+
+variable "sap_vm_provision_bastion_user" {
+ description = "OS User to create on Bastion host to avoid pass-through root user (e.g. bastionuser)"
+}
+
+variable "sap_vm_provision_bastion_ssh_port" {
+ type = number
+ description = "Bastion host SSH Port from IANA Dynamic Ports range (49152 to 65535)"
+
+ validation {
+ condition = var.sap_vm_provision_bastion_ssh_port > 49152 && var.sap_vm_provision_bastion_ssh_port < 65535
+ error_message = "Bastion host SSH Port must fall within IANA Dynamic Ports range (49152 to 65535)."
+ }
+}
+
+
+
+variable "map_os_image_regex" {
+
+ description = "Map of operating systems OS Image regex, to identify latest OS Image for the OS major.minor version"
+
+ type = map(any)
+
+ default = {
+
+ rhel-7-6-sap-ha = ".*redhat.*7-6.*amd64.*hana.*"
+
+ rhel-8-1-sap-ha = ".*redhat.*8-1.*amd64.*hana.*"
+
+ rhel-8-2-sap-ha = ".*redhat.*8-2.*amd64.*hana.*"
+
+ rhel-8-4-sap-ha = ".*redhat.*8-4.*amd64.*hana.*"
+
+ rhel-7-6-sap-applications = ".*redhat.*7-6.*amd64.*applications.*"
+
+ rhel-8-1-sap-applications = ".*redhat.*8-1.*amd64.*applications.*"
+
+ rhel-8-2-sap-applications = ".*redhat.*8-2.*amd64.*applications.*"
+
+ rhel-8-4-sap-applications = ".*redhat.*8-4.*amd64.*applications.*"
+
+ rhel-8-4 = ".*redhat.*8-4.*minimal.*amd64.*"
+
+ sles-12-4-sap-ha = ".*sles.*12-4.*amd64.*hana.*"
+
+ sles-15-1-sap-ha = ".*sles.*15-1.*amd64.*hana.*"
+
+ sles-15-2-sap-ha = ".*sles.*15-2.*amd64.*hana.*"
+
+ sles-12-4-sap-applications = ".*sles.*12-4.*amd64.*applications.*"
+
+ sles-15-1-sap-applications = ".*sles.*15-1.*amd64.*applications.*"
+
+ sles-15-2-sap-applications = ".*sles.*15-2.*amd64.*applications.*"
+
+ }
+
+}
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_vm_provision_ibmcloud_vs_host_os_image" {
+ description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA - Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+
+variable "map_host_specifications" {
+ description = "Map of host specficiations for SAP HANA single node install"
+ type = map(any)
+
+
+ default = {
+
+ xsmall_256gb = {
+
+ hana-p = { // Hostname
+
+ sap_host_type = "hana_primary" # hana_primary, nwas_ascs, nwas_pas, nwas_aas
+ virtual_server_profile = "mx2-32x256"
+ disable_ip_anti_spoofing = false
+
+ storage_definition = [
+
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ #disk_count = 1
+ disk_size = 384
+ #disk_type = gp3
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_size = 384
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 384
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swapfile"
+ disk_size = 2
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 100
+ filesystem_type = "xfs"
+ }
+
+ ]
+
+ }
+
+ }
+
+ }
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..084fccd
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmcloud_vs/tf_template/tf_template_outputs.tf
@@ -0,0 +1,61 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+output "bastion_os_user" {
+ value = var.sap_vm_provision_bastion_user
+}
+
+output "sap_vm_provision_bastion_public_ip" {
+ value = module.run_bastion_inject_module.output_bastion_ip
+}
+
+output "bastion_port" {
+ value = var.sap_vm_provision_bastion_ssh_port
+}
+
+
+output "sap_vm_provision_nfs_mount_point" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}:/", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_separate_sap_transport_dir" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}:/", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_type" {
+ value = "nfs4"
+}
+
+output "sap_vm_provision_nfs_mount_point_opts" {
+ value = "nfsvers=4.1,sec=sys,_netdev,hard,timeo=600,retrans=2,noresvport,acl"
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "bastion_rsa" {
+ content = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ filename = "${path.root}/ssh/bastion_rsa"
+ file_permission = "0400"
+}
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_account_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml
new file mode 100644
index 0000000..5f0a5db
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/execute_main.yml
@@ -0,0 +1,182 @@
+---
+
+- name: Ansible to Terraform notification
+ ansible.builtin.debug:
+ msg: "Ansible to Terraform requires re-work for this Infrastructure Platform"
+
+
+# Requires re-work, OpenStack Terraform Provider has changed since version lock and causes a differences in provisioning / required variables
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ when: (rework_test | default(false))
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - IBM PowerVM
+ register: terraform_template1_result
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ ibmpowervc_auth_endpoint: "{{ sap_vm_provision_ibmpowervm_vc_auth_endpoint }}"
+ ibmpowervc_user: "{{ sap_vm_provision_ibmpowervm_vc_user }}"
+ ibmpowervc_user_password: "{{ sap_vm_provision_ibmpowervm_vc_user_password }}"
+
+ ibmpowervc_project_name: "{{ sap_vm_provision_ibmpowervm_vc_project_name }}"
+ ibmpowervc_host_group_name: "{{ sap_vm_provision_ibmpowervm_host_group_name }}"
+ # sap_vm_provision_ibmpowervm_host_group_shared_procesor_pool_name: "{{ }}"
+ ibmpowervc_network_name: "{{ sap_vm_provision_ibmpowervm_network_name }}"
+ # sap_vm_provision_ibmpowervm_network_vnic_type: "{{ }}"
+ # sap_vm_provision_ibmpowervm_storage_template_name:"{{ }}"
+ ibmpowervc_os_image_name: "{{ sap_vm_provision_ibmpowervm_vm_host_os_image }}"
+ # ibmpowervc_storage_storwize_hostname_short: "{{ }}"
+ # ibmpowervc_storage_storwize_storage_pool_flash: "{{ }}"
+ # ibmpowervc_storage_storwize_storage_pool: "{{ }}"
+ ibmpowervc_template_compute_name: "{{ inventory_hostname }}-compute-template"
+
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template.tf
new file mode 100644
index 0000000..ed27863
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template.tf
@@ -0,0 +1,86 @@
+# Terraform declaration
+terraform {
+ required_version = ">= 1.0, <= 1.5.7"
+ required_providers {
+ openstack = {
+ #source = "localdomain/provider/openstack" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/openstack/1.xx.xx/darwin_amd6
+ source = "terraform-provider-openstack/openstack"
+ version = "1.45.0"
+ }
+ }
+}
+
+# Terraform Provider declaration
+provider "openstack" {
+
+ # Define Provider inputs from given Terraform Variables
+ auth_url = var.ibmpowervc_auth_endpoint
+ user_name = var.ibmpowervc_user
+ password = var.ibmpowervc_user_password
+
+ tenant_name = var.ibmpowervc_project_name
+ #domain_name = "Default"
+ insecure = true
+}
+
+
+module "run_host_bootstrap_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmpowervc/host_bootstrap?ref=main"
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+}
+
+
+module "run_host_provision_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//ibmpowervc/host_provision?ref=main"
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+
+ module_var_ibmpowervc_template_compute_name = local.ibmpowervc_template_compute_name_create_boolean ? 0 : var.ibmpowervc_template_compute_name
+ module_var_ibmpowervc_template_compute_name_create_boolean = local.ibmpowervc_template_compute_name_create_boolean
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_host_ssh_key_name = module.run_host_bootstrap_module.output_host_ssh_key_name
+ module_var_host_public_ssh_key = module.run_host_bootstrap_module.output_host_public_ssh_key
+ module_var_host_private_ssh_key = module.run_host_bootstrap_module.output_host_private_ssh_key
+
+ module_var_ibmpowervc_host_group_name = var.ibmpowervc_host_group_name
+ module_var_ibmpowervc_network_name = var.ibmpowervc_network_name
+
+ module_var_ibmpowervc_compute_cpu_threads = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].ibmpowervm_vm_cpu_threads
+ module_var_ibmpowervc_compute_ram_gb = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].ibmpowervm_vm_memory_gib
+
+ module_var_ibmpowervc_os_image_name = var.ibmpowervc_os_image_name
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_lpar_hostname = each.key
+
+ module_var_ibmpowervc_storage_storwize_hostname_short = var.ibmpowervc_storage_storwize_hostname_short
+ module_var_ibmpowervc_storage_storwize_storage_pool = var.ibmpowervc_storage_storwize_storage_pool
+ module_var_ibmpowervc_storage_storwize_storage_pool_flash = var.ibmpowervc_storage_storwize_storage_pool_flash
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+ module_var_web_proxy_enable = false
+ module_var_os_vendor_enable = false
+
+ module_var_web_proxy_url = ""
+ module_var_web_proxy_exclusion = ""
+
+ module_var_os_vendor_account_user = ""
+ module_var_os_vendor_account_user_passcode = ""
+ module_var_os_systems_mgmt_host = ""
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..d77ac49
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,182 @@
+
+locals {
+ ibmpowervc_template_compute_name_create_boolean = var.ibmpowervc_template_compute_name == "new" ? true : false
+ #ibmpowervc_template_storage_name_create_boolean = var.ibmpowervc_template_storage_name == "new" ? true : false
+}
+
+
+variable "ibmpowervc_auth_endpoint" {
+ description = "IBM PowerVC: Authentication Endpoint (e.g. https://powervc-host:5000/v3/)"
+}
+
+variable "ibmpowervc_user" {
+ description = "IBM PowerVC: Username"
+}
+
+variable "ibmpowervc_user_password" {
+ description = "IBM PowerVC: User Password"
+}
+
+variable "ibmpowervc_project_name" {
+ description = "IBM PowerVC: Project Name"
+}
+
+variable "ibmpowervc_host_group_name" {
+ description = "IBM PowerVC: Host Group Name"
+}
+
+variable "ibmpowervc_network_name" {
+ description = "IBM PowerVC: Network Name"
+}
+
+variable "ibmpowervc_template_compute_name" {
+ description = "IBM PowerVC: Enter 'new' to create a Compute Template from the CPU and RAM in the host specification plan, or use an existing/target Compute Template Name"
+}
+
+variable "ibmpowervc_storage_storwize_hostname_short" {
+ description = "IBM PowerVC - Storage with IBM Storwize: Hostname short (e.g. v7000)"
+}
+
+variable "ibmpowervc_storage_storwize_storage_pool" {
+ description = "IBM PowerVC - Storage with IBM Storwize: Storage Pool (e.g. V7000_01)"
+}
+
+variable "ibmpowervc_storage_storwize_storage_pool_flash" {
+ description = "IBM PowerVC - Storage with IBM Storwize: Storage Pool with Flash Storage (e.g. FS900_01)"
+}
+
+variable "ibmpowervc_os_image_name" {
+ description = "IBM PowerVC: OS Image Name"
+}
+
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Prefix to resource names"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Server"
+}
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA install: Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+
+variable "map_host_specifications" {
+
+ description = "Map of host specficiations for SAP BW/4HANA single node install"
+
+ type = map(any)
+
+ default = {
+
+ small_256gb = {
+
+ bwh01 = { // Hostname
+ ibmpowervc_compute_cpu_threads = 32
+ ibmpowervc_compute_ram_gb = 256
+ storage_definition = [
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ disk_size = 512
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_size = 128
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 256
+ filesystem_type = "xfs"
+ },
+ {
+ name = "usr_sap"
+ mountpoint = "/usr/sap"
+ disk_size = 96
+ filesystem_type = "xfs"
+ },
+ {
+ name = "sapmnt"
+ mountpoint = "/sapmnt"
+ disk_size = 96
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swap"
+ disk_size = 32
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 100
+ filesystem_type = "xfs"
+ }
+ ]
+ }
+
+
+ }
+ }
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..8014e75
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ibmpowervm_vm/tf_template/tf_template_outputs.tf
@@ -0,0 +1,24 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_host_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/kubevirt_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/kubevirt_vm/execute_main.yml
new file mode 100644
index 0000000..5ebbc22
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/kubevirt_vm/execute_main.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Ansible to Terraform notification
+ ansible.builtin.debug:
+ msg: "Ansible to Terraform is not implemented for this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml
new file mode 100644
index 0000000..649fdde
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/execute_main.yml
@@ -0,0 +1,185 @@
+---
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - MS Azure
+ register: terraform_template1_result
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ az_app_client_id: "{{ sap_vm_provision_msazure_app_client_id }}"
+ az_app_client_secret: "{{ sap_vm_provision_msazure_app_client_secret }}"
+ az_subscription_id: "{{ sap_vm_provision_msazure_subscription_id }}"
+ az_tenant_id: "{{ sap_vm_provision_msazure_tenant_id }}"
+ az_location_availability_zone_no: "{{ sap_vm_provision_msazure_location_availability_zone_no }}"
+ az_location_region: "{{ sap_vm_provision_msazure_location_region }}"
+ az_resource_group_name: "{{ sap_vm_provision_msazure_resource_group_name }}"
+ az_vnet_name: "{{ sap_vm_provision_msazure_vnet_name }}"
+ az_vnet_subnet_name: "{{ sap_vm_provision_msazure_vnet_subnet_name }}"
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ sap_vm_provision_bastion_os_image: "{{ sap_vm_provision_bastion_os_image }}"
+ sap_vm_provision_bastion_user: "{{ sap_vm_provision_bastion_user }}"
+ sap_vm_provision_bastion_ssh_port: "{{ sap_vm_provision_bastion_ssh_port }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+ sap_vm_provision_msazure_vm_host_os_image: "{{ sap_vm_provision_msazure_vm_host_os_image }}"
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for NFS
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ sap_vm_provision_nfs_mount_point: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_separate_sap_transport_dir'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_type: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_type'].value | default('') }}"
+ sap_vm_provision_nfs_mount_point_opts: "{{ terraform_output.outputs['sap_vm_provision_nfs_mount_point_opts'].value | default('') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template.tf
new file mode 100644
index 0000000..451c802
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template.tf
@@ -0,0 +1,263 @@
+
+# Terraform declaration
+terraform {
+ required_version = ">= 1.0, <= 1.5.5"
+ required_providers {
+ azurerm = {
+ #source = "localdomain/provider/azurerm" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/azurerm/1.xx.xx/darwin_amd6
+ source = "hashicorp/azurerm" // Terraform Registry
+ version = ">=2.90.0"
+ }
+ azapi = {
+ source = "Azure/azapi"
+ version = ">=1.3.0"
+ }
+ }
+}
+
+# Terraform Provider declaration
+
+provider "azurerm" {
+
+ features {}
+
+ tenant_id = var.az_tenant_id // Azure Tenant ID, linked to the Azure Active Directory instance
+ subscription_id = var.az_subscription_id // Azure Subscription ID, linked to an Azure Tenant. All resource groups belong to the Azure Subscription.
+
+ client_id = var.az_app_client_id // Azure Client ID, defined in the Azure Active Directory instance; equivalent to Active Directory Application ID.
+ client_secret = var.az_app_client_secret // Azure Application ID Password, defined in the Azure Active Directory instance
+
+ # Role-based Access Control (RBAC) permissions control the actions for resources within the Azure Subscription.
+ # The Roles are assigned to a Security Principal - which can be a User, Group, Service Principal or Managed Identity.
+
+}
+
+provider "azapi" {
+ tenant_id = var.az_tenant_id // Azure Tenant ID, linked to the Azure Active Directory instance
+ subscription_id = var.az_subscription_id // Azure Subscription ID, linked to an Azure Tenant. All resource groups belong to the Azure Subscription.
+
+ client_id = var.az_app_client_id // Azure Client ID, defined in the Azure Active Directory instance; equivalent to Active Directory Application ID.
+ client_secret = var.az_app_client_secret // Azure Application ID Password, defined in the Azure Active Directory instance
+}
+
+
+module "run_account_init_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/account_init?ref=main"
+
+ module_var_az_resource_group_name = local.resource_group_create_boolean ? 0 : var.az_resource_group_name
+ module_var_az_resource_group_create_boolean = local.resource_group_create_boolean
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_az_location_region = var.az_location_region
+ module_var_az_location_availability_zone_no = var.az_location_availability_zone_no
+
+ module_var_az_vnet_name = local.az_vnet_name_create_boolean ? 0 : var.az_vnet_name
+ module_var_az_vnet_name_create_boolean = local.az_vnet_name_create_boolean
+
+ module_var_az_vnet_subnet_name = local.az_vnet_subnet_name_create_boolean ? 0 : var.az_vnet_subnet_name
+ module_var_az_vnet_subnet_name_create_boolean = local.az_vnet_subnet_name_create_boolean
+
+}
+
+
+module "run_account_bootstrap_module" {
+
+ depends_on = [
+ module.run_account_init_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/account_bootstrap?ref=main"
+
+ module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_az_location_region = var.az_location_region
+ module_var_az_location_availability_zone_no = var.az_location_availability_zone_no
+
+ module_var_az_vnet_name = module.run_account_init_module.output_vnet_name
+ module_var_az_vnet_subnet_name = module.run_account_init_module.output_vnet_subnet_name
+
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+}
+
+
+#module "run_account_iam_module" {
+#
+# depends_on = [
+# module.run_account_bootstrap_module
+# ]
+#
+# count = var.az_iam_yesno == "yes" ? 1 : 0
+#
+# source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/account_iam?ref=main"
+#
+# module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+# module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+#
+#}
+
+
+module "run_bastion_inject_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/bastion_inject?ref=main"
+
+ module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_az_location_region = var.az_location_region
+ module_var_az_location_availability_zone_no = var.az_location_availability_zone_no
+
+ module_var_az_vnet_name = module.run_account_init_module.output_vnet_name
+ module_var_az_vnet_subnet_name = module.run_account_init_module.output_vnet_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_ssh_key_id = module.run_account_bootstrap_module.output_bastion_ssh_key_id
+ module_var_bastion_public_ssh_key = module.run_account_bootstrap_module.output_bastion_public_ssh_key
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+
+ module_var_bastion_os_image = var.map_os_image_regex[var.sap_vm_provision_bastion_os_image]
+
+}
+
+
+module "run_host_network_access_sap_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/host_network_access_sap?ref=main"
+
+ module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+
+ module_var_az_vnet_name = module.run_account_init_module.output_vnet_name
+ module_var_az_vnet_subnet_name = module.run_account_init_module.output_vnet_subnet_name
+
+ module_var_host_security_group_name = module.run_account_bootstrap_module.output_host_security_group_name
+
+ module_var_sap_nwas_abap_ascs_instance_no = var.sap_nwas_abap_ascs_instance_no
+ module_var_sap_nwas_abap_pas_instance_no = var.sap_nwas_abap_pas_instance_no
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_network_access_sap_public_via_proxy_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/host_network_access_sap_public_via_proxy?ref=main"
+
+ module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+
+ module_var_az_vnet_name = module.run_account_init_module.output_vnet_name
+ module_var_az_vnet_subnet_name = module.run_account_init_module.output_vnet_subnet_name
+ module_var_az_vnet_bastion_subnet_name = module.run_bastion_inject_module.output_vnet_bastion_subnet_name
+
+ module_var_host_security_group_name = module.run_account_bootstrap_module.output_host_security_group_name
+ module_var_bastion_security_group_name = module.run_bastion_inject_module.output_bastion_security_group_name
+ module_var_bastion_connection_security_group_name = module.run_bastion_inject_module.output_bastion_connection_security_group_name
+
+ module_var_sap_nwas_abap_pas_instance_no = var.sap_nwas_abap_pas_instance_no
+ module_var_sap_hana_instance_no = var.sap_hana_install_instance_nr
+
+}
+
+
+module "run_host_nfs_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/host_nfs?ref=main"
+
+ module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_az_location_region = var.az_location_region
+ module_var_az_location_availability_zone_no = var.az_location_availability_zone_no
+
+ module_var_az_vnet_name = module.run_account_init_module.output_vnet_name
+ module_var_az_vnet_subnet_name = module.run_account_init_module.output_vnet_subnet_name
+
+ module_var_host_security_group_name = module.run_account_bootstrap_module.output_host_security_group_name
+
+ module_var_nfs_boolean_sapmnt = sum(flatten(
+ [
+ for host in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] :
+ [ for storage_item in host["storage_definition"] : try(storage_item.nfs_path,"ignore") != "ignore" ? 1 : 0 ]
+ ] )) >0 ? true : false
+
+
+ module_var_dns_zone_name = module.run_account_bootstrap_module.output_dns_zone_name
+}
+
+
+module "run_host_provision_module" {
+
+ depends_on = [
+ module.run_account_init_module,
+ module.run_account_bootstrap_module,
+ module.run_bastion_inject_module
+ ]
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//msazure_vm/host_provision?ref=main"
+
+ module_var_az_resource_group_name = module.run_account_init_module.output_resource_group_name
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_az_location_region = var.az_location_region
+ module_var_az_location_availability_zone_no = var.az_location_availability_zone_no
+
+ module_var_az_vnet_name = module.run_account_init_module.output_vnet_name
+ module_var_az_vnet_subnet_name = module.run_account_init_module.output_vnet_subnet_name
+
+ module_var_bastion_user = var.sap_vm_provision_bastion_user
+ module_var_bastion_ssh_port = var.sap_vm_provision_bastion_ssh_port
+ module_var_bastion_private_ssh_key = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ module_var_bastion_ip = module.run_bastion_inject_module.output_bastion_ip
+ module_var_bastion_connection_sg_id = module.run_bastion_inject_module.output_bastion_connection_security_group_id
+
+ module_var_host_ssh_key_id = module.run_account_bootstrap_module.output_host_ssh_key_id
+ module_var_host_ssh_public_key = module.run_account_bootstrap_module.output_host_public_ssh_key
+ module_var_host_ssh_private_key = module.run_account_bootstrap_module.output_host_private_ssh_key
+ module_var_host_sg_id = module.run_account_bootstrap_module.output_host_security_group_id
+
+ module_var_host_os_image = var.map_os_image_regex[var.sap_vm_provision_msazure_vm_host_os_image]
+
+ module_var_dns_zone_name = module.run_account_bootstrap_module.output_dns_zone_name
+ module_var_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_host_name = each.key
+
+ module_var_az_vm_instance = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].virtual_machine_profile
+ module_var_disable_ip_anti_spoofing = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].disable_ip_anti_spoofing
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+}
+
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..908e802
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,267 @@
+
+locals {
+
+ resource_group_create_boolean = var.az_resource_group_name == "new" ? true : false
+
+ az_vnet_name_create_boolean = var.az_vnet_name == "new" ? true : false
+
+ az_vnet_subnet_name_create_boolean = var.az_vnet_subnet_name == "new" ? true : false
+
+ # Directories start with "C:..." on Windows; All other OSs use "/" for root.
+ detect_windows = substr(pathexpand("~"), 0, 1) == "/" ? false : true
+ detect_shell = substr(pathexpand("~"), 0, 1) == "/" ? true : false
+
+ # Used for displaying Shell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ not_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? false : true : true
+
+ # Used for displaying Windows PowerShell ssh connection output
+ # /proc/version contains WSL subsstring, if detected then running Windows Subsystem for Linux
+ is_wsl = fileexists("/proc/version") ? length(regexall("WSL", file("/proc/version"))) > 0 ? true : false : false
+
+}
+
+
+variable "az_tenant_id" {
+ description = "Azure Tenant ID"
+}
+
+variable "az_subscription_id" {
+ description = "Azure Subscription ID"
+}
+
+variable "az_app_client_id" {
+ description = "Azure AD App Client ID"
+}
+
+variable "az_app_client_secret" {
+ description = "Azure AD App Client Secret"
+}
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Enter prefix to resource names"
+}
+
+variable "az_resource_group_name" {
+ description = "Enter existing/target Azure Resource Group name, or enter 'new' to create a Resource Group using the defined prefix for all resources"
+}
+
+variable "az_location_region" {
+ description = "Target Azure Region aka. Azure Location Display Name (e.g. 'West Europe')"
+}
+
+variable "az_location_availability_zone_no" {
+ description = "Target Azure Availability Zone (e.g. 1)"
+}
+
+variable "az_vnet_name" {
+ description = "Enter existing/target Azure VNet name, or enter 'new' to create a VPC with a default VPC Address Prefix Range (cannot be 'new' if using existing VNet Subnet)"
+}
+
+variable "az_vnet_subnet_name" {
+ description = "Enter existing/target Azure VNet Subnet name, or enter 'new' to create a VPC with a default VPC Address Prefix Range (if using existing VNet, ensure default subnet range matches to VNet address space and does not conflict with existing Subnet)"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Machine"
+}
+
+variable "sap_vm_provision_bastion_os_image" {
+ description = "Bastion OS Image. This variable uses the locals mapping with regex of OS Images, and will alter bastion provisioning."
+}
+
+variable "sap_vm_provision_bastion_user" {
+ description = "OS User to create on Bastion host to avoid pass-through root user (e.g. bastionuser)"
+}
+
+variable "sap_vm_provision_bastion_ssh_port" {
+ type = number
+ description = "Bastion host SSH Port from IANA Dynamic Ports range (49152 to 65535)"
+
+ validation {
+ condition = var.sap_vm_provision_bastion_ssh_port > 49152 && var.sap_vm_provision_bastion_ssh_port < 65535
+ error_message = "Bastion host SSH Port must fall within IANA Dynamic Ports range (49152 to 65535)."
+ }
+}
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_vm_provision_msazure_vm_host_os_image" {
+ description = "Host OS Image. This variable uses the locals mapping with regex of OS Images, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA install: Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+variable "map_os_image_regex" {
+
+ description = "Map of operating systems OS Image regex, to identify latest OS Image for the OS major.minor version"
+
+ type = map(any)
+
+ default = {
+
+ rhel-8-4 = {
+ publisher = "RedHat"
+ offer = "RHEL"
+ sku = "84-gen2"
+ },
+
+ rhel-8-1-sap-ha = {
+ publisher = "RedHat"
+ offer = "RHEL-SAP-HA"
+ sku = "81sapha-gen2"
+ },
+
+ rhel-8-2-sap-ha = {
+ publisher = "RedHat"
+ offer = "RHEL-SAP-HA"
+ sku = "82sapha-gen2"
+ },
+
+ rhel-8-4-sap-ha = {
+ publisher = "RedHat"
+ offer = "RHEL-SAP-HA"
+ sku = "84sapha-gen2"
+ },
+
+ rhel-8-1-sap-applications = {
+ publisher = "RedHat"
+ offer = "RHEL-SAP-APPS"
+ sku = "81sapapps-gen2"
+ },
+
+ rhel-8-2-sap-applications = {
+ publisher = "RedHat"
+ offer = "RHEL-SAP-APPS"
+ sku = "82sapapps-gen2"
+ },
+
+ rhel-8-4-sap-applications = {
+ publisher = "RedHat"
+ offer = "RHEL-SAP-APPS"
+ sku = "84sapapps-gen2"
+ }
+
+ }
+
+}
+
+
+
+variable "map_host_specifications" {
+ description = "Map of host specficiations for SAP HANA single node install"
+ type = map(any)
+
+ default = {
+
+ xsmall_256gb = {
+
+ hana-p = { // Hostname
+
+ sap_host_type = "hana_primary" # hana_primary, nwas_ascs, nwas_pas, nwas_aas
+ vm_instance = "Standard_D32s_v5"
+ disable_ip_anti_spoofing = false
+
+ storage_definition = [
+
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ disk_count = 4
+ disk_size = 64
+ disk_type = "P6"
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_count = 3
+ disk_size = 128
+ disk_type = "P10"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 256
+ disk_type = "P15"
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swapfile"
+ disk_size = 2
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 128
+ disk_type = "P10"
+ filesystem_type = "xfs"
+ }
+
+ ]
+
+ }
+
+ }
+
+ }
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..893d108
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/msazure_vm/tf_template/tf_template_outputs.tf
@@ -0,0 +1,61 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+output "bastion_os_user" {
+ value = var.sap_vm_provision_bastion_user
+}
+
+output "sap_vm_provision_bastion_public_ip" {
+ value = module.run_bastion_inject_module.output_bastion_ip
+}
+
+output "bastion_port" {
+ value = var.sap_vm_provision_bastion_ssh_port
+}
+
+
+output "sap_vm_provision_nfs_mount_point" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}:/", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_separate_sap_transport_dir" {
+ value = try("${module.run_host_nfs_module.output_nfs_fqdn}:/", "")
+}
+
+output "sap_vm_provision_nfs_mount_point_type" {
+ value = "nfs4"
+}
+
+output "sap_vm_provision_nfs_mount_point_opts" {
+ value = "nfsvers=4.1,rsize=262144,wsize=262144,hard,timeo=600,retrans=2,noatime,proto=tcp,namlen=255"
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "bastion_rsa" {
+ content = module.run_account_bootstrap_module.output_bastion_private_ssh_key
+ filename = "${path.root}/ssh/bastion_rsa"
+ file_permission = "0400"
+}
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_account_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ovirt_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ovirt_vm/execute_main.yml
new file mode 100644
index 0000000..5ebbc22
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/ovirt_vm/execute_main.yml
@@ -0,0 +1,5 @@
+---
+
+- name: Ansible to Terraform notification
+ ansible.builtin.debug:
+ msg: "Ansible to Terraform is not implemented for this Infrastructure Platform"
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml
new file mode 100644
index 0000000..b016c0b
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/execute_main.yml
@@ -0,0 +1,183 @@
+---
+
+- name: Ansible to Terraform notification
+ ansible.builtin.debug:
+ msg: "Ansible to Terraform requires re-work for this Infrastructure Platform"
+
+
+# Requires re-work, VMware Terraform Provider uses the VMware Managed Object Management Interface (VMOMI) SOAP APIs
+# and Ansible uses newer VMware vSphere REST APIs. This impacts provisioning approach
+
+- name: Ansible Task block for Terraform apply of multiple Terraform Modules
+ when: (rework_test | default(false))
+ block:
+
+ # Do not use ansible.builtin.copy as this will cause error 'not writable' on localhost (even if user has permissions)
+ - name: Copy Terraform Template files to temporary directory in current Ansible Playbook directory
+ ansible.builtin.shell: |
+ mkdir -p {{ sap_vm_provision_terraform_work_dir_path }}
+ cp -r {{ role_path }}/tasks/platform_ansible_to_terraform/{{ sap_vm_provision_iac_platform }}/tf_template/* {{ sap_vm_provision_terraform_work_dir_path }}
+
+ - name: Terraform Template for SAP - VMware
+ register: terraform_template1_result
+ cloud.terraform.terraform:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ state: "{{ sap_vm_provision_terraform_state }}"
+ force_init: true
+ complex_vars: true
+ variables:
+ vmware_vcenter_server: "{{ sap_vm_provision_vmware_vcenter_hostname }}"
+ vmware_vcenter_user: "{{ sap_vm_provision_vmware_vcenter_user }}"
+ vmware_vcenter_user_password: "{{ sap_vm_provision_vmware_vcenter_password }}"
+
+ vmware_vsphere_datacenter_compute_cluster_folder_name: "{{ sap_vm_provision_vmware_vm_folder_name }}"
+ vmware_vsphere_datacenter_compute_cluster_name: "{{ sap_vm_provision_vmware_vm_cluster_name }}"
+ vmware_vsphere_datacenter_compute_cluster_host_fqdn: "{{ sap_vm_provision_vmware_vm_cluster_host_name }}"
+
+ # vmware_vsphere_datacenter_name: "{{ }}"
+ # vmware_vsphere_datacenter_network_primary_name: "{{ }}"
+ vmware_vsphere_datacenter_storage_datastore_name: "{{ sap_vm_provision_vmware_vm_cluster_datastore_name }}"
+
+ vmware_vm_template_name: "{{ sap_vm_provision_vmware_vm_template_name }}"
+
+ sap_vm_provision_resource_prefix: "{{ sap_vm_provision_resource_prefix }}"
+ sap_vm_provision_dns_root_domain: "{{ sap_vm_provision_dns_root_domain }}"
+ map_os_image_regex: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_os_image_dictionary') }}"
+ map_host_specifications: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ sap_vm_provision_host_specification_plan: "{{ sap_vm_provision_host_specification_plan }}"
+
+ sap_vm_provision_ibmcloud_vs_host_os_image: "{{ sap_vm_provision_ibmcloud_vs_host_os_image }}"
+
+ sap_software_download_directory: "{{ sap_software_download_directory }}"
+ sap_hana_install_instance_nr: "{{ sap_hana_install_instance_nr | default('') }}"
+ sap_nwas_abap_ascs_instance_no: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+ sap_nwas_abap_pas_instance_no: "{{ sap_swpm_pas_instance_nr | default('') }}"
+
+ - name: Terraform Template output
+ ansible.builtin.debug:
+ var: terraform_template1_result
+
+
+ # - name: Execute Ansible Role cloud.terraform.inventory_from_outputs
+ # register: terraform_output_to_ansible_inventory
+ # ansible.builtin.include_role:
+ # name: cloud.terraform.inventory_from_outputs
+ # vars:
+ # project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ # mapping_variables:
+ # host_list: sap_host_list
+ # name: output_host_name
+ # ip: output_host_ip
+ # user: output_host_os_user
+ # group: output_ansible_inventory_group
+
+
+ - name: Read outputs from project path
+ when: sap_vm_provision_terraform_state == "present"
+ cloud.terraform.terraform_output:
+ project_path: "{{ sap_vm_provision_terraform_work_dir_path }}"
+ register: terraform_output_project_path
+
+ - name: Add hosts from terraform_output to the group defined in terraform_output
+ when: sap_vm_provision_terraform_state == "present"
+ register: terraform_add_hosts
+ ansible.builtin.add_host:
+ name: "{{ item['output_host_name'] }}"
+ groups: "{{ item['output_ansible_inventory_group'] }}"
+ ansible_host: "{{ item['output_host_ip'] }}"
+ ansible_user: "{{ item['output_host_os_user'] }}"
+ ansible_ssh_private_key_file: "{{ sap_vm_provision_terraform_work_dir_path }}/ssh/hosts_rsa"
+ ansible_ssh_common_args: -o ConnectTimeout=180 -o ControlMaster=auto -o ControlPersist=3600s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ForwardX11=no -o ProxyCommand='ssh -W %h:%p {{ terraform_output.outputs['bastion_os_user'].value }}@{{ terraform_output.outputs['sap_vm_provision_bastion_public_ip'].value }} -p {{ terraform_output.outputs['bastion_port'].value }} -i {{ sap_vm_provision_terraform_work_dir_path }}/ssh/bastion_rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'
+ loop: "{{ terraform_output.outputs['sap_host_list'].value }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+
+# Cannot override any variables from extravars input, see https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#understanding-variable-precedence
+# Ensure no default value exists for any prompted variable before execution of Ansible Playbook
+
+ - name: Set fact to hold all inventory hosts in all groups
+ ansible.builtin.set_fact:
+ groups_merged_list: "{{ [ [ groups['hana_primary'] | default([]) ] , [ groups['hana_secondary'] | default([]) ] , [ groups['nwas_ascs'] | default([]) ] , [ groups['nwas_ers'] | default([]) ] , [ groups['nwas_pas'] | default([]) ] , [ groups['nwas_aas'] | default([]) ] , [ groups['anydb_primary'] | default([]) ] , [ groups['anydb_secondary'] | default([]) ] ] | flatten | select() }}"
+
+ - name: Set facts for all hosts - use facts from localhost for host specification dictionary
+ when: sap_vm_provision_terraform_state == "present"
+ ansible.builtin.set_fact:
+ host_specifications_dictionary: "{{ lookup('ansible.builtin.vars', 'sap_vm_provision_' + sap_vm_provision_iac_platform + '_host_specifications_dictionary') }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+ vars:
+ # even skipped tasks register variables, so we need to choose one explicitly
+ terraform_output: "{{ (terraform_output_project_path is defined and terraform_output_project_path is success) |
+ ternary(terraform_output_project_path, terraform_output_state_file) }}"
+
+ - name: Set Ansible Vars
+ register: register_set_ansible_vars
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ # Required to collect the remote host's facts for further processing
+ # in the following steps
+ - name: Gather host facts
+ ansible.builtin.setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ # Must be set to short hostname,
+ # so that command 'hostname' and 'hostname -s' return the short hostname only;
+ # otherwise may cause error with SAP SWPM using name.domain.com.domain.com
+ - name: Change system hostname (must be set to short name and not FQDN, as required by SAP)
+ ansible.builtin.hostname:
+ name: "{{ inventory_hostname_short }}"
+ delegate_to: "{{ item }}"
+ delegate_facts: true
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts
+ register: register_etc_hosts_file
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts.yml
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for HA
+ register: register_etc_hosts_file_ha
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_ha.yml
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"] | length>0)) or (groups["nwas_ers"] is defined and (groups["nwas_ers"] | length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"] | length>0))
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set /etc/hosts for Scale-Out
+ register: register_etc_hosts_file_scaleout
+ ansible.builtin.include_tasks:
+ file: common/set_etc_hosts_scaleout.yml
+ when:
+ - (groups["hana_primary"] is defined and (groups["hana_primary"] | length>0)) and (sap_hana_scaleout_active_coordinator is defined or sap_hana_scaleout_active_worker is defined or sap_hana_scaleout_standby is defined)
+ args:
+ apply:
+ delegate_to: "{{ item }}"
+ run_once: true # Otherwise tasks will run twice per host
+ loop: "{{ groups_merged_list }}"
+
+ - name: Set vars for sap_storage_setup Ansible Role
+ when: sap_vm_provision_terraform_state == "present"
+ register: register_ansible_vars_storage
+ ansible.builtin.include_tasks:
+ file: common/set_ansible_vars_storage.yml
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template.tf
new file mode 100644
index 0000000..31194db
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template.tf
@@ -0,0 +1,85 @@
+# Terraform declaration
+terraform {
+ required_version = ">= 1.0, <= 1.5.7"
+ required_providers {
+ vsphere = {
+# source = "localdomain/provider/vsphere" // Local, on macOS path to place files would be $HOME/.terraform.d/plugins/localdomain/provider/vsphere/1.xx.xx/darwin_amd6
+ source = "hashicorp/vsphere"
+ version = ">=2.6.0"
+ }
+ }
+}
+
+# Terraform Provider declaration
+provider "vsphere" {
+
+ # Define Provider inputs from given Terraform Variables
+ user = var.vmware_vcenter_user
+ password = var.vmware_vcenter_user_password
+ vsphere_server = var.vmware_vcenter_server
+
+ # Self-signed certificate
+ allow_unverified_ssl = true
+
+}
+
+
+module "run_host_bootstrap_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//vmware_vm/host_bootstrap?ref=main"
+
+}
+
+
+module "run_host_provision_module" {
+
+ source = "github.com/sap-linuxlab/terraform.modules_for_sap//vmware_vm/host_provision?ref=main"
+
+ # Set Terraform Module Variables using Terraform Variables at runtime
+
+ module_var_resource_prefix = var.sap_vm_provision_resource_prefix
+
+ module_var_host_public_ssh_key = module.run_host_bootstrap_module.output_host_public_ssh_key
+ module_var_host_private_ssh_key = module.run_host_bootstrap_module.output_host_private_ssh_key
+
+
+ module_var_vmware_vcenter_server = var.vmware_vcenter_server
+ module_var_vmware_vcenter_user = var.vmware_vcenter_user
+ module_var_vmware_vcenter_user_password = var.vmware_vcenter_user_password
+
+ module_var_vmware_vsphere_datacenter_name = var.vmware_vsphere_datacenter_name
+ module_var_vmware_vsphere_datacenter_compute_cluster_name = var.vmware_vsphere_datacenter_compute_cluster_name
+ module_var_vmware_vsphere_datacenter_compute_cluster_host_fqdn = var.vmware_vsphere_datacenter_compute_cluster_host_fqdn
+
+ module_var_vmware_vsphere_datacenter_compute_cluster_folder_name = var.vmware_vsphere_datacenter_compute_cluster_folder_name
+ module_var_vmware_vsphere_datacenter_storage_datastore_name = var.vmware_vsphere_datacenter_storage_datastore_name
+ module_var_vmware_vsphere_datacenter_network_primary_name = var.vmware_vsphere_datacenter_network_primary_name
+
+ module_var_vmware_vm_template_name = var.vmware_vm_template_name
+
+ module_var_vmware_vm_dns_root_domain_name = var.sap_vm_provision_dns_root_domain
+
+ # Set Terraform Module Variables using for_each loop on a map Terraform Variable with nested objects
+
+ for_each = toset([
+ for key, value in var.map_host_specifications[var.sap_vm_provision_host_specification_plan] : key
+ ])
+
+ module_var_vmware_vm_hostname = each.key
+
+ module_var_vmware_vm_compute_cpu_threads = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].vmware_vm_cpu_threads
+ module_var_vmware_vm_compute_ram_gb = var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key].vmware_vm_memory_gib
+
+ module_var_storage_definition = [ for storage_item in var.map_host_specifications[var.sap_vm_provision_host_specification_plan][each.key]["storage_definition"] : storage_item if contains(keys(storage_item),"disk_size") && try(storage_item.swap_path,"") == "" ]
+
+ module_var_web_proxy_enable = false
+ module_var_os_vendor_enable = false
+
+ module_var_web_proxy_url = ""
+ module_var_web_proxy_exclusion = ""
+
+ module_var_os_vendor_account_user = ""
+ module_var_os_vendor_account_user_passcode = ""
+ module_var_os_systems_mgmt_host = ""
+
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf
new file mode 100644
index 0000000..f7f7585
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_input_vars.tf
@@ -0,0 +1,168 @@
+
+variable "vmware_vcenter_user" {
+ description = "Target vCenter: User (e.g. Administrator@vsphere.local)"
+}
+
+variable "vmware_vcenter_user_password" {
+ description = "Target vCenter: User Password"
+}
+
+variable "vmware_vcenter_server" {
+ description = "Target vCenter: Host Server FQDN (e.g. vcenter.domain.local)"
+}
+
+variable "vmware_vsphere_datacenter_name" {
+ description = "Target vSphere Datacenter name"
+}
+
+variable "vmware_vsphere_datacenter_compute_cluster_name" {
+ description = "Target vSphere Datacenter Compute Cluster name, to host the VMware Virtual Machine"
+}
+
+variable "vmware_vsphere_datacenter_compute_cluster_host_fqdn" {
+ description = "Target vSphere Datacenter Compute specified vSphere Host FQDN, to host the VMware Virtual Machine"
+}
+
+variable "vmware_vsphere_datacenter_compute_cluster_folder_name" {
+ description = "Target vSphere Datacenter Compute Cluster Folder name, the logical directory for the VMware Virtual Machine"
+}
+
+variable "vmware_vsphere_datacenter_storage_datastore_name" {}
+
+variable "vmware_vsphere_datacenter_network_primary_name" {}
+
+variable "vmware_vm_template_name" {
+ description = "VMware VM Template name to use for provisioning"
+}
+
+
+variable "sap_vm_provision_resource_prefix" {
+ description = "Prefix to resource names"
+}
+
+variable "sap_vm_provision_dns_root_domain" {
+ description = "Root Domain for Private DNS used with the Virtual Server"
+}
+
+variable "sap_vm_provision_host_specification_plan" {
+ description = "Host specification plans are xsmall_256gb. This variable uses the locals mapping with a nested list of host specifications, and will alter host provisioning."
+}
+
+variable "sap_software_download_directory" {
+ description = "Mount point for downloads of SAP Software"
+
+ validation {
+ error_message = "Directory must start with forward slash."
+ condition = can(regex("^/", var.sap_software_download_directory))
+ }
+
+}
+
+
+variable "sap_hana_install_instance_nr" {
+ description = "Ansible - SAP HANA install: Instance Number (e.g. 90)"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_hana_install_instance_nr))
+ }
+
+}
+
+variable "sap_nwas_abap_ascs_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - ABAP Central Services (ASCS) instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_ascs_instance_no))
+ }
+
+}
+
+variable "sap_nwas_abap_pas_instance_no" {
+ description = "Ansible - SAP NetWeaver AS (ABAP) - Primary Application Server instance number"
+
+ validation {
+ error_message = "Cannot use Instance Number 43 (HA port number) or 89 (Windows Remote Desktop Services)."
+ condition = !can(regex("(43|89)", var.sap_nwas_abap_pas_instance_no))
+ }
+
+}
+
+
+variable "map_host_specifications" {
+
+ description = "Map of host specficiations for SAP BW/4HANA single node install"
+
+ type = map(any)
+
+ default = {
+
+ small_256gb = {
+
+ bwh01 = { // Hostname
+ vmware_vm_compute_cpu_threads = 32
+ vmware_vm_compute_ram_gb = 256
+ storage_definition = [
+ {
+ name = "hana_data"
+ mountpoint = "/hana/data"
+ disk_size = 512
+ #disk_iops =
+ filesystem_type = "xfs"
+ #lvm_lv_name =
+ #lvm_lv_stripes =
+ #lvm_lv_stripe_size =
+ #lvm_vg_name =
+ #lvm_vg_options =
+ #lvm_vg_physical_extent_size =
+ #lvm_pv_device =
+ #lvm_pv_options =
+ #nfs_path =
+ #nfs_server =
+ #nfs_filesystem_type =
+ #nfs_mount_options =
+ },
+ {
+ name = "hana_log"
+ mountpoint = "/hana/log"
+ disk_size = 128
+ filesystem_type = "xfs"
+ },
+ {
+ name = "hana_shared"
+ mountpoint = "/hana/shared"
+ disk_size = 256
+ filesystem_type = "xfs"
+ },
+ {
+ name = "usr_sap"
+ mountpoint = "/usr/sap"
+ disk_size = 96
+ filesystem_type = "xfs"
+ },
+ {
+ name = "sapmnt"
+ mountpoint = "/sapmnt"
+ disk_size = 96
+ filesystem_type = "xfs"
+ },
+ {
+ name = "swap"
+ mountpoint = "/swap"
+ disk_size = 32
+ filesystem_type = "swap"
+ },
+ {
+ name = "software"
+ mountpoint = "/software"
+ disk_size = 100
+ filesystem_type = "xfs"
+ }
+ ]
+ }
+
+
+ }
+ }
+}
diff --git a/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_outputs.tf b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_outputs.tf
new file mode 100644
index 0000000..8014e75
--- /dev/null
+++ b/roles/sap_vm_provision/tasks/platform_ansible_to_terraform/vmware_vm/tf_template/tf_template_outputs.tf
@@ -0,0 +1,24 @@
+
+output "sap_host_list" {
+ value = [
+ for key in module.run_host_provision_module: {
+ "output_host_name" : key.output_host_name ,
+ "output_host_ip" : key.output_host_private_ip ,
+ "output_host_os_user" : "root" ,
+ "output_ansible_inventory_group" : var.map_host_specifications[var.sap_vm_provision_host_specification_plan][key.output_host_name].sap_host_type
+# "output_ansible_inventory_group" : can(regex("^hana.*",key.output_host_name)) ? "hana_primary" : can(regex("^nw.*",key.output_host_name)) ? can(regex(".*ascs.*",key.output_host_name)) ? "nwas_ascs" : can(regex(".*pas.*",key.output_host_name)) ? "nwas_pas" : can(regex(".*aas.*",key.output_host_name)) ? "nwas_aas" : "ERROR" : "ERROR"
+ }
+ ]
+}
+
+
+##############################################################
+# Export SSH key to file on local
+##############################################################
+
+# Use path object to store key files temporarily in root of execution - https://www.terraform.io/docs/language/expressions/references.html#filesystem-and-workspace-info
+resource "local_file" "hosts_rsa" {
+ content = module.run_host_bootstrap_module.output_host_private_ssh_key
+ filename = "${path.root}/ssh/hosts_rsa"
+ file_permission = "0400"
+}
diff --git a/roles/sap_vm_temp_vip/README.md b/roles/sap_vm_temp_vip/README.md
new file mode 100644
index 0000000..c72b8b0
--- /dev/null
+++ b/roles/sap_vm_temp_vip/README.md
@@ -0,0 +1,81 @@
+`Beta`
+
+# sap_vm_temp_vip Ansible Role
+
+Ansible Role for assignment of Temporary Virtual IP (VIP) to OS Network Interface prior to Linux Pacemaker ownership.
+
+This Ansible Role will (dependent on detected Infrastructure Platform) perform assignment of a Virtual IP Address to the OS Network Interface.
+
+
+## Functionality
+
+The hosts for SAP Software allocated for High Availability are configured with a temporary Virtual IP for the OS Network Interface; thereby allowing Linux Pacemaker to be installed once the SAP Software installation has concluded (best practice for Linux Pacemaker). When an Infrastructure Platform with specific requirements is detected (e.g. Load Balancers), then bespoke actions are performed.
+
+
+## Scope
+
+Only hosts required for High Availability (such as SAP HANA Primary node, SAP NetWeaver ASCS/ERS) should use this Ansible Role.
+
+Assumptions are made based upon the default High Availability configuration for a given Infrastructure Platform (e.g. using Linux Pacemaker `IPAddr2` resource agent).
+
+
+## Requirements
+
+### Target hosts
+
+**OS Versions:**
+- Red Hat Enterprise Linux 8.2+
+- SUSE Linux Enterprise Server 15 SP3+
+
+### Execution/Controller host
+
+**Dependencies:**
+- OS Packages
+ - Python 3.9.7+ (i.e. CPython distribution)
+- Python Packages
+ - None
+- Ansible
+ - Ansible Core 2.12.0+
+ - Ansible Collections:
+ - None
+
+
+## Execution
+
+### Sample execution
+
+For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/).
+
+### Suggested execution sequence
+
+It is advised this Ansible Role is used only for High Availability and executed prior to execution of:
+- sap_hana_install
+- sap_swpm
+
+Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first.
+
+### Summary of execution flow
+
+- Identify IPv4 Address with CIDR and Broadcast Address
+- If SAP AnyDB or SAP NetWeaver, assign Virtual IP to OS Network Interface. If SAP HANA, skip
+- Start temporary listener for SAP HANA, SAP AnyDB or SAP NetWeaver when using Load Balancers _(GCP, IBM Cloud, MS Azure)_
+
+### Tags to control execution
+
+There are no tags used to control the execution of this Ansible Role
+
+
+## License
+
+Apache 2.0
+
+
+## Authors
+
+Sean Freeman
+
+---
+
+## Ansible Role Input Variables
+
+Please first check the [/defaults parameters file](./defaults/main.yml).
diff --git a/roles/sap_vm_temp_vip/defaults/main.yml b/roles/sap_vm_temp_vip/defaults/main.yml
new file mode 100644
index 0000000..8ab3cd8
--- /dev/null
+++ b/roles/sap_vm_temp_vip/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+
+sap_vm_temp_vip_hana_primary: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('') }}"
+sap_vm_temp_vip_anydb_primary: ""
+sap_vm_temp_vip_nwas_abap_ascs: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('') }}"
+sap_vm_temp_vip_nwas_abap_ers: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('') }}"
+# sap_vm_temp_vip_nwas_abap_pas: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('') }}"
+# sap_vm_temp_vip_nwas_abap_aas: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('') }}"
diff --git a/roles/sap_vm_temp_vip/meta/main.yml b/roles/sap_vm_temp_vip/meta/main.yml
new file mode 100644
index 0000000..b0ef957
--- /dev/null
+++ b/roles/sap_vm_temp_vip/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ namespace: community
+ author: Sean Freeman
+ description: SAP VM Temporary Virtual IP configuration
+ company: IBM
+ license: Apache-2.0
+ min_ansible_version: 2.12
+ platforms:
+ - name: EL
+ versions: [8, 9]
+ galaxy_tags: ['sap', 'aws', 'gcp', 'msazure', 'ibmcloud', 'ibmpower', 'ovirt', 'kubevirt', 'vmware', 'rhel', 'redhat', 'sles', 'suse']
+dependencies: []
diff --git a/roles/sap_vm_temp_vip/meta/runtime.yml b/roles/sap_vm_temp_vip/meta/runtime.yml
new file mode 100644
index 0000000..c2ea658
--- /dev/null
+++ b/roles/sap_vm_temp_vip/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.12.0'
diff --git a/roles/sap_vm_temp_vip/tasks/identify_network_interface.yml b/roles/sap_vm_temp_vip/tasks/identify_network_interface.yml
new file mode 100644
index 0000000..ab56363
--- /dev/null
+++ b/roles/sap_vm_temp_vip/tasks/identify_network_interface.yml
@@ -0,0 +1,31 @@
+---
+
+# RHEL uses NetworkManager as default
+
+# SLES and NetworkManager
+# NetworkManager is only supported by SUSE for desktop workloads with SLED or the Workstation extension.
+# NetworkManager is not supported by SUSE for server workloads.
+# wicked is used for network configuration and all server certifications, and using NetworkManager may invalidate them.
+# wicked does not have an existing Ansible Module/Role
+# source - https://documentation.suse.com/sles/15-SP5/html/SLES-all/cha-nm.html
+
+
+# - name: Find the primary OS network interface adapter
+# delegate_to: "{{ host_node }}"
+# delegate_facts: true
+# ansible.builtin.shell:
+# # Find network adapter - Works only if 1 adapter is present
+# #ACTIVE_NETWORK_ADAPTER=$(ip -o link show | awk '{print $2,$9}' | grep UP | awk -F: '{print $1}')
+
+# # Find network adapter - identify the adapter, by showing which is used for the Default Gateway route
+# # https://serverfault.com/questions/47915/how-do-i-get-the-default-gateway-in-linux-given-the-destination
+
+# # Added if statement to catch RHEL installations with route table multiple default entries. EXAMPLE:
+# ### default via 10.243.1.1 dev eth0
+# ### default via 10.243.1.1 dev eth0 proto dhcp metric 100
+# if [[ $(ip route show default 0.0.0.0/0) == *$'\n'* ]]; then
+# ACTIVE_NETWORK_ADAPTER=$(ip route show default 0.0.0.0/0 | awk '/default/ && !/metric/ {print $5}')
+# ACTIVE_NETWORK_ADAPTER=${ACTIVE_NETWORK_ADAPTER%;*}
+# else
+# ACTIVE_NETWORK_ADAPTER=$(ip route show default 0.0.0.0/0 | awk '/default/ {print $5}')
+# fi
diff --git a/roles/sap_vm_temp_vip/tasks/main.yml b/roles/sap_vm_temp_vip/tasks/main.yml
new file mode 100644
index 0000000..cdcd579
--- /dev/null
+++ b/roles/sap_vm_temp_vip/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+
+- name: Setup temporary Virtual IP (VIP)
+ block:
+
+ # - name: Identify OS Primary Network Interface
+ # ansible.builtin.include_tasks: "identify_network_interface.yml"
+
+ - name: Execute temporary set of a Virtual IP (VIP) prior to Linux Pacemaker ownership
+ ansible.builtin.include_tasks: "set_temp_vip.yml"
+
+ - name: Set Health Check Probe Listener for Virtual IP when Load Balancer
+ ansible.builtin.include_tasks: "set_temp_vip_lb_listener.yml"
+ when: (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine')
diff --git a/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml b/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml
new file mode 100644
index 0000000..193917a
--- /dev/null
+++ b/roles/sap_vm_temp_vip/tasks/set_temp_vip.yml
@@ -0,0 +1,126 @@
+---
+
+## Set Virtual IPs
+# for AWS VPC, must be outside of VPC Subnet CIDR Range
+# for MS Azure VNet, must be within the VNet Subnet CIDR Range attached to the Load Balancer
+# for GCP VPC, must be within the VNet Subnet CIDR Range attached to the Load Balancer
+# for IBM Cloud VPC, will automatically be within the VPC Subnet CIDR Range as Load Balancer owns/determines the Virtual IP; must not set VIP on the Host OS Network Interface as a secondary IP
+# for IBM Power IaaS VLAN on IBM Cloud, must be within the VLAN Subnet CIDR Range
+# for IBM PowerVM, must be within the VLAN Subnet CIDR Range
+
+
+# Use of Primary IP Address default netmask prefix and/or the broadcast is automatic for Linux Pacemaker
+# For AWS, this would be the VPC Subnet Netmask CIDR e.g. /24
+# For MS Azure, this would be the VNet Subnet Netmask CIDR e.g. /24
+# For GCP, this would be static Netmask CIDR /32 unless using custom OS Image - https://cloud.google.com/vpc/docs/create-use-multiple-interfaces#i_am_having_connectivity_issues_when_using_a_netmask_that_is_not_32
+
+- name: Set fact for Broadcast Address and Prefix of the Primary IP
+ ansible.builtin.set_fact:
+ ip_broadcast_address: "{{ ansible_default_ipv4.broadcast | default('') }}"
+ ip_cidr_prefix: "{{ ansible_default_ipv4.prefix | default('') }}"
+
+
+#### HA of HANA Primary/Secondary ####
+
+# Not required before SAP HANA installation or Linux Pacemaker installation, performed so the VIP connectivity can be tested
+- name: Append temporary Virtual IP (VIP) to network interface for SAP HANA, will be replaced by Linux Pacemaker IPaddr2 Resource Agent
+ ansible.builtin.shell: |
+ if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }}/32 brd + dev eth0
+ elif [ "{{ ip_broadcast_address }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0
+ elif [ "{{ ip_cidr_prefix }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_hana_primary | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0
+ fi
+ when:
+ - (groups["hana_secondary"] is defined and inventory_hostname in groups["hana_primary"]) and (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0))
+ - not ansible_chassis_asset_tag == 'ibmcloud'
+ ignore_errors: true
+
+# Not required before SAP HANA installation or Linux Pacemaker installation, performed so the VIP connectivity can be tested
+- name: Append temporary Virtual IP (VIP) to network interface for SAP AnyDB, will be replaced by Linux Pacemaker IPaddr2 Resource Agent
+ ansible.builtin.shell: |
+ if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }}/32 brd + dev eth0
+ elif [ "{{ ip_broadcast_address }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0
+ elif [ "{{ ip_cidr_prefix }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_anydb_primary | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0
+ fi
+ when:
+ - (groups["anydb_secondary"] is defined and inventory_hostname in groups["anydb_primary"]) and (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0))
+ - not ansible_chassis_asset_tag == 'ibmcloud'
+ ignore_errors: true
+
+
+#### HA of ASCS/ERS ####
+
+# Required before running SAP SWPM
+# Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host."
+# And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation
+- name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver ASCS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent
+ ansible.builtin.shell: |
+ if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }}/32 brd + dev eth0
+ elif [ "{{ ip_broadcast_address }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0
+ elif [ "{{ ip_cidr_prefix }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_nwas_abap_ascs | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0
+ fi
+ when:
+ - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ascs"]) and (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+ - not ansible_chassis_asset_tag == 'ibmcloud'
+ ignore_errors: true
+
+# Required before running SAP SWPM
+# Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host."
+# And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation
+- name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver ERS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent
+ ansible.builtin.shell: |
+ if [ "{{ ip_broadcast_address }}" = "" ] && [ "{{ ip_cidr_prefix }}" = "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }}/32 brd + dev eth0
+ elif [ "{{ ip_broadcast_address }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }} brd {{ ip_broadcast_address }} dev eth0
+ elif [ "{{ ip_cidr_prefix }}" != "" ]
+ then
+ ip address add {{ sap_vm_temp_vip_nwas_abap_ers | regex_replace('/.*', '') }}/{{ ip_cidr_prefix }} brd + dev eth0
+ fi
+ when:
+ - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ers"]) and (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+ - not ansible_chassis_asset_tag == 'ibmcloud'
+ ignore_errors: true
+
+
+#### HA of PAS/AAS [rare, comment out] ####
+
+# # Required before running SAP SWPM
+# # Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host."
+# # And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation
+# - name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver PAS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent
+# ansible.builtin.shell: ip address add {{ sap_vm_temp_vip_nwas_abap_pas | regex_replace('/.*', '') }}/24 brd + dev eth0
+# when:
+# - (groups["nwas_pas"] is defined and inventory_hostname in groups["nwas_pas"]) and (groups["nwas_pas"] is defined and (groups["nwas_pas"]|length>0))
+# - not ansible_chassis_asset_tag == 'ibmcloud'
+# ignore_errors: true
+
+# # Required before running SAP SWPM
+# # Otherwise CSiManagerInterfaces.cpp will provide WARNING "The host with the name XXXXXX defined by SAPINST_USE_HOSTNAME is not a virtual host on the local host."
+# # And if the Virtual Hostname / Virtual IP cannot resolve, it will likely prevent SAP SWPM from completing the installation
+# - name: Append temporary Virtual IP (VIP) to network interface for SAP NetWeaver AAS, will be replaced by Linux Pacemaker IPaddr2 Resource Agent
+# ansible.builtin.shell: ip address add {{ sap_vm_temp_vip_nwas_abap_aas | regex_replace('/.*', '') }}/24 brd + dev eth0
+
+# when:
+# - (groups["nwas_pas"] is defined and inventory_hostname in groups["nwas_aas"]) and (groups["nwas_pas"] is defined and (groups["nwas_pas"]|length>0))
+# - not ansible_chassis_asset_tag == 'ibmcloud'
+# ignore_errors: true
diff --git a/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml b/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml
new file mode 100644
index 0000000..9252c52
--- /dev/null
+++ b/roles/sap_vm_temp_vip/tasks/set_temp_vip_lb_listener.yml
@@ -0,0 +1,48 @@
+---
+
+- name: Install netcat and lsof utils
+ ansible.builtin.package:
+ name:
+ - nc
+ - lsof
+ state: present
+
+
+# Must use while loop to avoid netcat process ending too early
+# Required when using Load Balancers (i.e. Google Cloud, IBM Cloud, MS Azure)
+# Temporary listener for SAP HANA or SAP AnyDB used is 55550
+# Temporary listener for SAP NetWeaver ASCS used is 55551
+# Temporary listener for SAP NetWeaver ERS used is 55552; must be different to ASCS Health Check Port to avoid ASCS VIP distributing to ERS host
+
+- name: Start netcat temporary listen on port 55550 for SAP HANA or SAP AnyDB for 6 hours (until SAP installation complete) to respond to Load Balancer Health Check probe until Linux Pacemaker started
+ ansible.builtin.shell: |
+ if ! $(lsof -Pi :55550 -sTCP:LISTEN -t >/dev/null) ; then
+ nohup timeout 6h bash -c "while true; do nc -vvv -l -k 55550 ; done" /dev/null 2>&1 &
+ sleep 2
+ fi
+ when:
+ - (groups["hana_secondary"] is defined and (groups["hana_secondary"]|length>0)) or (groups["anydb_secondary"] is defined and (groups["anydb_secondary"]|length>0))
+ - (groups["hana_secondary"] is defined and inventory_hostname in groups["hana_primary"]) or (groups["anydb_secondary"] is defined and inventory_hostname in groups["anydb_primary"])
+ - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine')
+
+- name: Start netcat temporary listen on port 55551 for SAP NetWeaver ASCS for 6 hours (until SAP installation complete) to respond to Load Balancer Health Check probe until Linux Pacemaker started
+ ansible.builtin.shell: |
+ if ! $(lsof -Pi :55551 -sTCP:LISTEN -t >/dev/null) ; then
+ nohup timeout 6h bash -c "while true; do nc -vvv -l -k 55551 ; done" /dev/null 2>&1 &
+ sleep 2
+ fi
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+ - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ascs"])
+ - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine')
+
+- name: Start netcat temporary listen on port 55552 for SAP NetWeaver ERS for 6 hours (until SAP installation complete) to respond to Load Balancer Health Check probe until Linux Pacemaker started
+ ansible.builtin.shell: |
+ if ! $(lsof -Pi :55552 -sTCP:LISTEN -t >/dev/null) ; then
+ nohup timeout 6h bash -c "while true; do nc -vvv -l -k 55552 ; done" /dev/null 2>&1 &
+ sleep 2
+ fi
+ when:
+ - (groups["nwas_ers"] is defined and (groups["nwas_ers"]|length>0))
+ - (groups["nwas_ers"] is defined and inventory_hostname in groups["nwas_ers"])
+ - (ansible_product_name == 'Google Compute Engine') or (ansible_chassis_asset_tag == 'ibmcloud') or (ansible_chassis_vendor == 'Microsoft Corporation' and ansible_product_name == 'Virtual Machine')
diff --git a/roles/sap_vm_verify/README.md b/roles/sap_vm_verify/README.md
new file mode 100644
index 0000000..34f98e7
--- /dev/null
+++ b/roles/sap_vm_verify/README.md
@@ -0,0 +1,75 @@
+`WIP`
+
+# sap_vm_verify Ansible Role
+
+Ansible Role for verification of Virtual Machine state and readiness to perform SAP Software installation.
+
+This Ansible Role will perform preflight checks whether necessary storage and directories exist on the host, network connectivity between hosts on specific ports and network connectivity to NFS etc.
+
+
+## Functionality
+
+All hosts of SAP Software require various storage and network requirements to be fulfilled; particularly network interconnectivity between hosts and other services (e.g. NFS). Prior to installation of SAP Software, verification checks can provide an alert for blocked Ports and error before a partial/errored installation occurs.
+
+
+## Scope
+
+All hosts for SAP Software.
+
+
+## Requirements
+
+### Target hosts
+
+**OS Versions:**
+- Red Hat Enterprise Linux 8.2+
+- SUSE Linux Enterprise Server 15 SP3+
+
+### Execution/Controller host
+
+**Dependencies:**
+- OS Packages
+ - Python 3.9.7+ (i.e. CPython distribution)
+- Python Packages
+ - None
+- Ansible
+ - Ansible Core 2.12.0+
+ - Ansible Collections:
+ - None
+
+
+## Execution
+
+### Sample execution
+
+For further information, see the [sample Ansible Playbooks in `/playbooks`](../playbooks/).
+
+### Suggested execution sequence
+
+Prior to execution of this Ansible Role, there are no Ansible Roles suggested to be executed first.
+
+### Summary of execution flow
+
+- Detect Platform (or specify)
+- Execute storage availability and I/O checks
+- Execute network interconnectivity checks
+
+### Tags to control execution
+
+There are no tags used to control the execution of this Ansible Role
+
+
+## License
+
+Apache 2.0
+
+
+## Authors
+
+TBD
+
+---
+
+## Ansible Role Input Variables
+
+Please first check the [/defaults parameters file](./defaults/main.yml).
diff --git a/roles/sap_vm_verify/defaults/main.yml b/roles/sap_vm_verify/defaults/main.yml
new file mode 100644
index 0000000..5c6e22b
--- /dev/null
+++ b/roles/sap_vm_verify/defaults/main.yml
@@ -0,0 +1,26 @@
+---
+
+sap_vm_verify_host_hana_primary: ""
+sap_vm_verify_host_hana_secondary: ""
+sap_vm_verify_host_anydb_primary: ""
+sap_vm_verify_host_anydb_secondary: ""
+sap_vm_verify_host_nwas_ascs: ""
+sap_vm_verify_host_nwas_ers: ""
+sap_vm_verify_host_nwas_pas: ""
+sap_vm_verify_host_nwas_aas: ""
+
+sap_vm_verify_instance_nr_db: "{{ sap_swpm_db_instance_nr | default('') }}"
+sap_vm_verify_instance_nr_nwas_abap_ascs: "{{ sap_swpm_ascs_instance_nr | default('') }}"
+sap_vm_verify_instance_nr_nwas_abap_ers: "{{ sap_swpm_ers_instance_nr | default('') }}"
+sap_vm_verify_instance_nr_nwas_abap_pas: "{{ sap_swpm_pas_instance_nr | default('') }}"
+sap_vm_verify_instance_nr_nwas_abap_aas: "{{ sap_swpm_aas_instance_nr | default('') }}"
+
+sap_vm_verify_vip_hana_primary: "{{ sap_ha_pacemaker_cluster_vip_hana_primary_ip_address | default('') }}"
+sap_vm_verify_vip_anydb_primary: ""
+sap_vm_verify_vip_nwas_abap_ascs: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ascs_ip_address | default('') }}"
+sap_vm_verify_vip_nwas_abap_ers: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_ers_ip_address | default('') }}"
+# sap_vm_verify_vip_nwas_abap_pas: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_pas_ip_address | default('') }}"
+# sap_vm_verify_vip_nwas_abap_aas: "{{ sap_ha_pacemaker_cluster_vip_nwas_abap_aas_ip_address | default('') }}"
+
+sap_vm_verify_nfs_mount_point: "{{ sap_vm_provision_nfs_mount_point | default('') }}"
+sap_vm_verify_nfs_mount_point_transport_dir: "{{ sap_vm_provision_nfs_mount_point_separate_sap_transport_dir | default('') }}"
diff --git a/roles/sap_vm_verify/meta/main.yml b/roles/sap_vm_verify/meta/main.yml
new file mode 100644
index 0000000..661a8bd
--- /dev/null
+++ b/roles/sap_vm_verify/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ namespace: community
+ author: Sean Freeman
+ description: SAP VM Verification Checks
+ company: IBM
+ license: Apache-2.0
+ min_ansible_version: 2.12
+ platforms:
+ - name: EL
+ versions: [8, 9]
+ galaxy_tags: ['sap', 'aws', 'gcp', 'msazure', 'ibmcloud', 'ibmpower', 'ovirt', 'kubevirt', 'vmware', 'rhel', 'redhat', 'sles', 'suse']
+dependencies: []
diff --git a/roles/sap_vm_verify/meta/runtime.yml b/roles/sap_vm_verify/meta/runtime.yml
new file mode 100644
index 0000000..c2ea658
--- /dev/null
+++ b/roles/sap_vm_verify/meta/runtime.yml
@@ -0,0 +1,2 @@
+---
+requires_ansible: '>=2.12.0'
diff --git a/roles/sap_vm_verify/tasks/check_network_interconnectivity.yml b/roles/sap_vm_verify/tasks/check_network_interconnectivity.yml
new file mode 100644
index 0000000..5038d09
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/check_network_interconnectivity.yml
@@ -0,0 +1,252 @@
+---
+
+# Netcat (nmap-netcat), use nc --verbose --idle-timeout "2" $IP $PORT which will display in stdout...
+# 'Connected to' followed by 'Idle timeout expired' or 'Connection reset by peer', when the host is accessible/responsive and port is listening
+# 'Idle timeout expired' only, when the host is NOT accessible/responsive
+# 'Connection refused' only, when the host is accessible/responsive and port is NOT listening
+
+- name: Check interconnectivity between hosts - SAP HANA - Primary
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_hana_primary }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 5{{ sap_vm_verify_instance_nr_db }}13
+ - 5{{ sap_vm_verify_instance_nr_db }}14
+ - 3{{ sap_vm_verify_instance_nr_db }}06
+ - 3{{ sap_vm_verify_instance_nr_db }}13
+ - 3{{ sap_vm_verify_instance_nr_db }}15
+ - 80{{ sap_vm_verify_instance_nr_db }}
+ - 43{{ sap_vm_verify_instance_nr_db }}
+
+- name: Check interconnectivity between hosts - SAP HANA - Secondary
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_hana_secondary }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 5{{ sap_vm_verify_instance_nr_db }}13
+ - 5{{ sap_vm_verify_instance_nr_db }}14
+ - 3{{ sap_vm_verify_instance_nr_db }}06
+ - 3{{ sap_vm_verify_instance_nr_db }}13
+ - 3{{ sap_vm_verify_instance_nr_db }}15
+ - 80{{ sap_vm_verify_instance_nr_db }}
+ - 43{{ sap_vm_verify_instance_nr_db }}
+
+- name: Check interconnectivity between hosts - SAP HANA System Replication - Primary
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_hana_primary }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 4{{ sap_vm_verify_instance_nr_db }}01
+ - 4{{ sap_vm_verify_instance_nr_db }}02
+ - 4{{ sap_vm_verify_instance_nr_db }}03
+ - 4{{ sap_vm_verify_instance_nr_db }}06
+ - 4{{ sap_vm_verify_instance_nr_db }}07
+ - 4{{ sap_vm_verify_instance_nr_db }}40 - 4{{ sap_vm_verify_instance_nr_db }}97
+ - 2224
+ - 3121
+ - 5104 - 5412
+
+- name: Check interconnectivity between hosts - SAP HANA System Replication - Secondary
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_hana_secondary }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 4{{ sap_vm_verify_instance_nr_db }}01
+ - 4{{ sap_vm_verify_instance_nr_db }}02
+ - 4{{ sap_vm_verify_instance_nr_db }}03
+ - 4{{ sap_vm_verify_instance_nr_db }}06
+ - 4{{ sap_vm_verify_instance_nr_db }}07
+ - 4{{ sap_vm_verify_instance_nr_db }}40 - 4{{ sap_vm_verify_instance_nr_db }}97
+ - 2224
+ - 3121
+ - 5104 - 5412
+
+- name: Check interconnectivity between hosts - SAP NetWeaver ABAP ASCS
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_nwas_ascs }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 32{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}
+ - 36{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}
+ - 39{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}13
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}14
+
+- name: Check interconnectivity between hosts - SAP NetWeaver ABAP ERS
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_nwas_ers }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 32{{ sap_vm_verify_instance_nr_nwas_abap_ers }}
+ - 36{{ sap_vm_verify_instance_nr_nwas_abap_ers }}
+ - 39{{ sap_vm_verify_instance_nr_nwas_abap_ers }}
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ers }}13
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ers }}14
+
+- name: Check interconnectivity between hosts - SAP NetWeaver ABAP PAS
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_nwas_pas }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 32{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+ - 33{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+ - 48{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_pas }}13
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_pas }}14
+ - 80{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+ - 443{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+
+- name: Check interconnectivity between hosts - SAP NetWeaver ABAP AAS
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_host_nwas_aas }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 32{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+ - 33{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+ - 48{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_aas }}13
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_aas }}14
+ - 80{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+ - 443{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+
+- name: Check interconnectivity between hosts - SAP Host Agent
+ ansible.builtin.wait_for:
+ host: "{{ ALL_HOSTS_HERE }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 1128
+ - 1129
+
+
+- name: Check interconnectivity between hosts and Virtual IP {{ sap_vm_verify_vip_hana_primary }} for SAP HANA
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_vip_hana_primary }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 5{{ sap_vm_verify_instance_nr_db }}13
+ - 5{{ sap_vm_verify_instance_nr_db }}14
+ - 3{{ sap_vm_verify_instance_nr_db }}06
+ - 3{{ sap_vm_verify_instance_nr_db }}13
+ - 3{{ sap_vm_verify_instance_nr_db }}15
+ - 80{{ sap_vm_verify_instance_nr_db }}
+ - 43{{ sap_vm_verify_instance_nr_db }}
+
+- name: Check interconnectivity between hosts and Virtual IP {{ sap_vm_verify_vip_hana_primary }} for SAP HANA System Replication
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_vip_hana_primary }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 4{{ sap_vm_verify_instance_nr_db }}01
+ - 4{{ sap_vm_verify_instance_nr_db }}02
+ - 4{{ sap_vm_verify_instance_nr_db }}03
+ - 4{{ sap_vm_verify_instance_nr_db }}06
+ - 4{{ sap_vm_verify_instance_nr_db }}07
+ - 4{{ sap_vm_verify_instance_nr_db }}40 - 4{{ sap_vm_verify_instance_nr_db }}97
+ - 2224
+ - 3121
+ - 5104 - 5412
+
+
+- name: Check interconnectivity between hosts and Virtual IP {{ sap_vm_verify_vip_nwas_abap_ascs }} for SAP NetWeaver ABAP ASCS
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_vip_nwas_abap_ascs }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 32{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}
+ - 36{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}
+ - 39{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}13
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ascs }}14
+
+- name: Check interconnectivity between hosts and Virtual IP {{ sap_vm_verify_vip_nwas_abap_ers }} for SAP NetWeaver ABAP ERS
+ ansible.builtin.wait_for:
+ host: "{{ sap_vm_verify_vip_nwas_abap_ers }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 32{{ sap_vm_verify_instance_nr_nwas_abap_ers }}
+ - 36{{ sap_vm_verify_instance_nr_nwas_abap_ers }}
+ - 39{{ sap_vm_verify_instance_nr_nwas_abap_ers }}
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ers }}13
+ - 5{{ sap_vm_verify_instance_nr_nwas_abap_ers }}14
+
+# - name: Check interconnectivity between hosts and Virtual IP {{ sap_vm_verify_vip_nwas_abap_pas }} for SAP NetWeaver ABAP PAS
+# ansible.builtin.wait_for:
+# host: "{{ sap_vm_verify_vip_nwas_abap_pas }}"
+# port: "{{ item }}"
+# delay: 10
+# sleep: 10
+# timeout: 600
+# loop:
+# - 32{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+# - 33{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+# - 48{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+# - 5{{ sap_vm_verify_instance_nr_nwas_abap_pas }}13
+# - 5{{ sap_vm_verify_instance_nr_nwas_abap_pas }}14
+# - 80{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+# - 443{{ sap_vm_verify_instance_nr_nwas_abap_pas }}
+
+# - name: Check interconnectivity between hosts and Virtual IP {{ sap_vm_verify_vip_nwas_abap_aas }} for SAP NetWeaver ABAP AAS
+# ansible.builtin.wait_for:
+# host: "{{ sap_vm_verify_vip_nwas_abap_aas }}"
+# port: "{{ item }}"
+# delay: 10
+# sleep: 10
+# timeout: 600
+# loop:
+# - 32{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+# - 33{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+# - 48{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+# - 5{{ sap_vm_verify_instance_nr_nwas_abap_aas }}13
+# - 5{{ sap_vm_verify_instance_nr_nwas_abap_aas }}14
+# - 80{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+# - 443{{ sap_vm_verify_instance_nr_nwas_abap_aas }}
+
+- name: Check interconnectivity between hosts - SAP Host Agent
+ ansible.builtin.wait_for:
+ host: "{{ ALL_VIRTUAL_IPS_HERE }}"
+ port: "{{ item }}"
+ delay: 10
+ sleep: 10
+ timeout: 600
+ loop:
+ - 1128
+ - 1129
diff --git a/roles/sap_vm_verify/tasks/check_network_performance.yml b/roles/sap_vm_verify/tasks/check_network_performance.yml
new file mode 100644
index 0000000..a199e8d
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/check_network_performance.yml
@@ -0,0 +1,3 @@
+---
+
+# Add network performance checks (iperf)
diff --git a/roles/sap_vm_verify/tasks/check_storage_generic.yml b/roles/sap_vm_verify/tasks/check_storage_generic.yml
new file mode 100644
index 0000000..a4cac07
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/check_storage_generic.yml
@@ -0,0 +1,19 @@
+---
+
+# - name: Check directories exist
+# /hana/data
+# /hana/log
+# /hana/shared
+# /usr/sap
+# /sapmnt
+
+# - name: Check directories have minimum quantities
+# /hana/data
+# /hana/log
+# /hana/shared
+# /usr/sap
+# /sapmnt
+
+# - name: Check swap (partition or file)
+# /swap
+# swapfile
diff --git a/roles/sap_vm_verify/tasks/check_storage_nfs.yml b/roles/sap_vm_verify/tasks/check_storage_nfs.yml
new file mode 100644
index 0000000..30b6af4
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/check_storage_nfs.yml
@@ -0,0 +1,24 @@
+---
+
+# HANA Scale-Out
+
+# NWAS
+
+
+# - name: Check each host can access the NFS, via netcat
+# ansible.builtin.shell: |
+# sap_vm_verify_nfs_mount_point='{{ sap_vm_verify_nfs_mount_point }}'
+# nc $sap_vm_verify_nfs_mount_point 2049
+# loop: "{{ [ groups['hana_primary'] , groups['nwas_ascs'] , groups['nwas_pas'] , groups['nwas_aas'] ] | flatten }}"
+# loop_control:
+# loop_var: host_node
+# register: shell_hostname
+
+# - name: Check SAP Mount Directory (/sapmnt) is NFS on each SAP NetWeaver host
+# ansible_facts.ansible_mounts using {{ ansible_facts['nodename'] }}
+
+# - name: Ensure SAP Common Transport Directory symlink (/usr/sap/trans > /sapmnt/trans) exists
+# assert.stat.islnk
+
+# - name: Check SAP Common Transport Directory is NFS on each SAP NetWeaver host
+# ansible_facts.ansible_mounts using {{ ansible_facts['nodename'] }}
diff --git a/roles/sap_vm_verify/tasks/check_storage_performance.yml b/roles/sap_vm_verify/tasks/check_storage_performance.yml
new file mode 100644
index 0000000..4a8f337
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/check_storage_performance.yml
@@ -0,0 +1,3 @@
+---
+
+# Add storage performance (fio) checks to simulate HCMT checks
diff --git a/roles/sap_vm_verify/tasks/main.yml b/roles/sap_vm_verify/tasks/main.yml
new file mode 100644
index 0000000..0fa7c06
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+
+- name: Begin Virtual Machine Verification preflight checks before SAP Software installation
+ block:
+
+ - name: Execute Storage Checks - Generic
+ ansible.builtin.include_tasks: "check_storage_generic.yml"
+
+ - name: Execute Storage Checks - NFS
+ ansible.builtin.include_tasks: "check_storage_nfs.yml"
+
+ - name: Execute Network Checks - Interconnectivity
+ ansible.builtin.include_tasks: "check_network_interconnectivity.yml"
+
+ - name: Execute Storage Checks - Performance
+ ansible.builtin.include_tasks: "check_storage_performance.yml"
+
+ - name: Execute Network Checks - Performance
+ ansible.builtin.include_tasks: "check_network_performance.yml"
+
+ # - name: Execute Infrastructure Platform Checks
+ # ansible.builtin.include_tasks: "platform/{{ sap_vm_verify_platform }}/main.yml"
diff --git a/roles/sap_vm_verify/tasks/platform/aws_ec2_vs/.gitkeep b/roles/sap_vm_verify/tasks/platform/aws_ec2_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/gcp_ce_vm/.gitkeep b/roles/sap_vm_verify/tasks/platform/gcp_ce_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/ibmcloud_powervs/.gitkeep b/roles/sap_vm_verify/tasks/platform/ibmcloud_powervs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/ibmcloud_vs/.gitkeep b/roles/sap_vm_verify/tasks/platform/ibmcloud_vs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/ibmpowervm_vm/.gitkeep b/roles/sap_vm_verify/tasks/platform/ibmpowervm_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/kubevirt_vm/.gitkeep b/roles/sap_vm_verify/tasks/platform/kubevirt_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/msazure_vm/.gitkeep b/roles/sap_vm_verify/tasks/platform/msazure_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform/ovirt_vm/.gitkeep b/roles/sap_vm_verify/tasks/platform/ovirt_vm/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/roles/sap_vm_verify/tasks/platform_checks_temp.yml b/roles/sap_vm_verify/tasks/platform_checks_temp.yml
new file mode 100644
index 0000000..4c4e5d0
--- /dev/null
+++ b/roles/sap_vm_verify/tasks/platform_checks_temp.yml
@@ -0,0 +1,11 @@
+---
+
+####
+# Google Cloud
+####
+
+# Add check that Google Cloud VPC Firewall rules do not block Google Cloud Backend Service connections, otherwise Health Status will be UNHEALTHY
+
+# Add check that Google Cloud NAT Gateway is configured for enough parallelisation
+# Use "160 as the minimum number of ports per VM" under Advanced Configurations in the Google Cloud NAT Gateway instance
+# Otherwise "Certain operating system updates run concurrent processes that might exceed the Cloud NAT default for the number of ports, which can result in activation or download errors."