diff --git a/conf/deployment/vsphere/upi_1az_rhcos_multus_public_cluster_vsan_3m_3w.yaml b/conf/deployment/vsphere/upi_1az_rhcos_multus_public_cluster_vsan_3m_3w.yaml new file mode 100644 index 00000000000..697c9dc851e --- /dev/null +++ b/conf/deployment/vsphere/upi_1az_rhcos_multus_public_cluster_vsan_3m_3w.yaml @@ -0,0 +1,24 @@ +--- +# This config is suppose to work on most of DCs we have. +DEPLOYMENT: + allow_lower_instance_requirements: false +ENV_DATA: + platform: "vsphere" + deployment_type: "upi" + worker_replicas: 3 + master_replicas: 3 + worker_num_cpus: "16" + master_num_cpus: "4" + master_memory: "16384" + compute_memory: "65536" + fio_storageutilization_min_mbps: 10.0 + is_multus_enabled: true + multus_public_net_interface: "ens224" + multus_cluster_net_interface: "ens224" + multus_create_public_net: true + multus_create_cluster_net: true + multus_public_net_namespace: "default" + +REPORTING: + polarion: + deployment_id: "OCS-6302" diff --git a/ocs_ci/deployment/vmware.py b/ocs_ci/deployment/vmware.py index b2ca2800435..087d7930beb 100644 --- a/ocs_ci/deployment/vmware.py +++ b/ocs_ci/deployment/vmware.py @@ -1075,7 +1075,6 @@ def deploy(self, log_cli_level="DEBUG"): os.chdir(self.previous_dir) if not self.sno: - # Update kubeconfig with proxy-url (if client_http_proxy # configured) to redirect client access through proxy server. update_kubeconfig_with_proxy_url_for_client(self.kubeconfig) @@ -1157,6 +1156,15 @@ def deploy(self, log_cli_level="DEBUG"): # Approving CSRs here in-case if any exists approve_pending_csr() + if config.ENV_DATA["is_multus_enabled"]: + + vsphere = VSPHERE( + config.ENV_DATA["vsphere_server"], + config.ENV_DATA["vsphere_user"], + config.ENV_DATA["vsphere_password"], + ) + + vsphere.add_interface_to_compute_vms() self.test_cluster() def deploy_ocp(self, log_cli_level="DEBUG"): diff --git a/ocs_ci/helpers/helpers.py b/ocs_ci/helpers/helpers.py index e3fca3a7176..460a03e7cb1 100644 --- a/ocs_ci/helpers/helpers.py +++ b/ocs_ci/helpers/helpers.py @@ -17,6 +17,7 @@ import inspect import stat import platform +import ipaddress from concurrent.futures import ThreadPoolExecutor from itertools import cycle from subprocess import PIPE, run @@ -4949,34 +4950,61 @@ def configure_node_network_configuration_policy_on_all_worker_nodes(): # This function require changes for compact mode logger.info("Configure NodeNetworkConfigurationPolicy on all worker nodes") worker_node_names = get_worker_nodes() + interface_num = 0 for worker_node_name in worker_node_names: - worker_network_configuration = config.ENV_DATA["baremetal"]["servers"][ - worker_node_name - ] node_network_configuration_policy = templating.load_yaml( constants.NODE_NETWORK_CONFIGURATION_POLICY ) - node_network_configuration_policy["spec"]["nodeSelector"][ - "kubernetes.io/hostname" - ] = worker_node_name - node_network_configuration_policy["metadata"]["name"] = ( - worker_network_configuration["node_network_configuration_policy_name"] - ) - node_network_configuration_policy["spec"]["desiredState"]["interfaces"][0][ - "ipv4" - ]["address"][0]["ip"] = worker_network_configuration[ - "node_network_configuration_policy_ip" - ] - node_network_configuration_policy["spec"]["desiredState"]["interfaces"][0][ - "ipv4" - ]["address"][0]["prefix-length"] = worker_network_configuration[ - "node_network_configuration_policy_prefix_length" - ] - node_network_configuration_policy["spec"]["desiredState"]["routes"]["config"][ - 0 - ]["destination"] = worker_network_configuration[ - "node_network_configuration_policy_destination_route" - ] + + if config.ENV_DATA["platform"] == constants.BAREMETAL_PLATFORM: + worker_network_configuration = config.ENV_DATA["baremetal"]["servers"][ + worker_node_name + ] + node_network_configuration_policy["spec"]["nodeSelector"][ + "kubernetes.io/hostname" + ] = worker_node_name + node_network_configuration_policy["metadata"]["name"] = ( + worker_network_configuration["node_network_configuration_policy_name"] + ) + node_network_configuration_policy["spec"]["desiredState"]["interfaces"][0][ + "ipv4" + ]["address"][0]["ip"] = worker_network_configuration[ + "node_network_configuration_policy_ip" + ] + node_network_configuration_policy["spec"]["desiredState"]["interfaces"][0][ + "ipv4" + ]["address"][0]["prefix-length"] = worker_network_configuration[ + "node_network_configuration_policy_prefix_length" + ] + node_network_configuration_policy["spec"]["desiredState"]["routes"][ + "config" + ][0]["destination"] = worker_network_configuration[ + "node_network_configuration_policy_destination_route" + ] + elif config.ENV_DATA["platform"] == constants.VSPHERE_PLATFORM: + + node_network_configuration_policy["spec"]["nodeSelector"][ + "kubernetes.io/hostname" + ] = worker_node_name + + node_network_configuration_policy["metadata"][ + "name" + ] = f"ceph-public-net-shim-{worker_node_name}" + shim_default_ip = node_network_configuration_policy["spec"]["desiredState"][ + "interfaces" + ][0]["ipv4"]["address"][0]["ip"] + + shim_ip = str(ipaddress.ip_address(shim_default_ip) + interface_num) + interface_num += 1 + + node_network_configuration_policy["spec"]["desiredState"]["interfaces"][0][ + "ipv4" + ]["address"][0]["ip"] = shim_ip + + node_network_configuration_policy["spec"]["desiredState"]["interfaces"][0][ + "mac-vlan" + ]["base-iface"] = constants.VSPHERE_MULTUS_INTERFACE + public_net_yaml = tempfile.NamedTemporaryFile( mode="w+", prefix="multus_public", delete=False ) diff --git a/ocs_ci/ocs/constants.py b/ocs_ci/ocs/constants.py index 2c14f51b679..5b4534364c9 100644 --- a/ocs_ci/ocs/constants.py +++ b/ocs_ci/ocs/constants.py @@ -1046,7 +1046,7 @@ TEMPLATE_DEPLOYMENT_DIR, "node_network_configuration_policy.yaml" ) NETWORK_ATTACHEMENT_DEFINITION = "network-attachment-definitions.k8s.cni.cncf.io" - +VSPHERE_MULTUS_INTERFACE = "ens224" OPERATOR_SOURCE_NAME = "ocs-operatorsource" diff --git a/ocs_ci/templates/ocs-deployment/node_network_configuration_policy.yaml b/ocs_ci/templates/ocs-deployment/node_network_configuration_policy.yaml index d5b804799ef..a95b5754f9f 100644 --- a/ocs_ci/templates/ocs-deployment/node_network_configuration_policy.yaml +++ b/ocs_ci/templates/ocs-deployment/node_network_configuration_policy.yaml @@ -1,29 +1,31 @@ apiVersion: nmstate.io/v1 kind: NodeNetworkConfigurationPolicy metadata: - name: ceph-public-net-shim-worker-node - namespace: openshift-storage + name: ceph-public-net-shim-worker-node + namespace: openshift-storage spec: - nodeSelector: - node-role.kubernetes.io/worker: "" - kubernetes.io/hostname: worker-node - desiredState: - interfaces: - - name: odf-pub-shim - description: Shim interface used to connect host to OpenShift Data Foundation public Multus network - type: mac-vlan - state: up - mac-vlan: - base-iface: enp1s0f1 - mode: bridge - promiscuous: true - ipv4: - enabled: true - dhcp: false - address: - - ip: 192.168.252.1 # STATIC IP FOR worker node - prefix-length: 24 - routes: - config: - - destination: 192.168.20.0/24 - next-hop-interface: odf-pub-shim + nodeSelector: + node-role.kubernetes.io/worker: "" + kubernetes.io/hostname: worker-node + desiredState: + interfaces: + - name: odf-pub-shim + description: Shim interface used to connect host to OpenShift Data Foundation public Multus network + type: mac-vlan + state: up + mac-vlan: + base-iface: enp1s0f1 + mode: bridge + promiscuous: true + ipv4: + enabled: true + dhcp: false + address: + - ip: 192.168.252.1 # STATIC IP FOR worker node + prefix-length: 24 + routes: + config: + - destination: 192.168.20.0/24 + next-hop-interface: odf-pub-shim + - destination: 192.168.30.0/24 + next-hop-interface: odf-pub-shim diff --git a/ocs_ci/utility/vsphere.py b/ocs_ci/utility/vsphere.py index d2a3b2de3ef..a309b6e7c8d 100644 --- a/ocs_ci/utility/vsphere.py +++ b/ocs_ci/utility/vsphere.py @@ -29,6 +29,7 @@ VM_DEFAULT_NETWORK, VM_DEFAULT_NETWORK_ADAPTER, ) +from ocs_ci.framework import config from ocs_ci.utility.utils import TimeoutSampler logger = logging.getLogger(__name__) @@ -1742,3 +1743,61 @@ def get_volume_path(self, volume_id, datastore_name, datacenter_name): volume_path = vstorage_object.config.backing.filePath logger.debug(f"File path for volume {volume_id} is `{volume_path}`") return volume_path + + def add_interface_to_compute_vms( + self, network_name="VM Network", adapter_type="vmxnet3" + ): + """ + Add idditional interface to VMs in pool run + + Args: + network_name (str): Network to add the interface, default VM Network + adapter_type (str): Type of network adapter, default vmxnet3 + + """ + pool = config.ENV_DATA["cluster_name"] + dc = config.ENV_DATA["vsphere_datacenter"] + cluster = config.ENV_DATA["vsphere_cluster"] + vms = self.get_compute_vms_in_pool(name=pool, dc=dc, cluster=cluster) + if not vms: + raise Exception(f"Compute VMs in '{pool}' not found.") + content = self.get_content + container = content.viewManager.CreateContainerView( + content.rootFolder, [vim.Network], True + ) + for conf in container.view: + if conf.name == network_name: + network = conf + break + if not network: + raise Exception(f"Network '{network_name}' not found.") + for vm in vms: + device_spec = vim.vm.device.VirtualDeviceSpec() + device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + if adapter_type == "vmxnet3": + nic = vim.vm.device.VirtualVmxnet3() + # Set the network backing + nic.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo() + nic.backing.network = network + nic.backing.deviceName = network_name + # Specify the adapter type + nic.key = -100 # Temporary key; vSphere assigns a unique key + nic.deviceInfo = vim.Description() + nic.deviceInfo.summary = ( + f"{adapter_type} adapter connected to {network_name}" + ) + device_spec.device = nic + # Create a VM configuration spec + spec = vim.vm.ConfigSpec() + spec.deviceChange = [device_spec] + # Reconfigure the VM + task = vm.ReconfigVM_Task(spec=spec) + logger.info( + f"Adding {adapter_type} adapter to VM '{vm.name}' on network '{network_name}'..." + ) + result = WaitForTask(task) + if result is None: + raise Exception( + f"Task for configuring network for {vm.name} did not complete successfully." + ) + logger.info(f"Network adapter added to {vm.name} successfully.")