From aeeee1030f874670563852f02bcb244ea11513f8 Mon Sep 17 00:00:00 2001 From: Bill Peck Date: Fri, 4 Oct 2024 11:50:53 -0400 Subject: [PATCH] Include ARC hosts in inventory By default ARC hosts will not show up in inventory, but you can use the following inventory config to get them. plugin: azure.azcollection.azure_rm include_arc_resource_groups: ['*'] Use hostvar_expressions to modify the default ansible ssh config hostvar_expressions: ansible_host: "resource_group + '-' + name if 'Microsoft.HybridCompute/machines' == resource_type else (public_dns_hostnames + public_ipv4_address) | first" ansible_ssh_common_args: "'-F /tmp/' + resource_group + '-' + name + '/ssh_config' if 'Microsoft.HybridCompute/machines' == resource_type" Use keyed_groups to organize them or tags keyed_groups: - prefix: "type" key: resource_type trailing_separator: false Use the azure_rm_arcssh action plugin to configure the dynamic inventory hosts with ssh proxy settings: - name: Configure ARC SSH Proxy hosts: localhost connection: local tasks: - name: Setup Proxy azure.azcollection.azure_rm_arcssh: inventory_hostname: "{{ item }}" ansible_host: "{{ hostvars[item].ansible_host }}" local_user: admin resource_group: "{{ hostvars[item].resource_group }}" resource_type: "{{ hostvars[item].resource_type }}" private_key_file: "~/.ssh/id_rsa" ssh_config_file: "/tmp/{{ hostvars[item].resource_group }}-{{ item }}/ssh_config" ssh_relay_file: "/tmp/{{ hostvars[item].resource_group }}-{{ item }}/relay_info" ssh_proxy_folder: "~/.clientsshproxy" loop: "{{ groups['type_Microsoft_HybridCompute_machines'] }}" - name: Ping ARC Hosts hosts: type_Microsoft_HybridCompute_machines tasks: - name: Ping ansible.builtin.ping: --- plugins/action/azure_rm_arcssh.py | 144 ++++++++ plugins/doc_fragments/azure_rm.py | 4 + plugins/inventory/azure_rm.py | 89 +++++ plugins/modules/azure_rm_arcssh.py | 117 +++++++ plugins/plugin_utils/connectivity_utils.py | 373 +++++++++++++++++++++ plugins/plugin_utils/constants.py | 48 +++ plugins/plugin_utils/file_utils.py | 88 +++++ plugins/plugin_utils/ssh_info.py | 63 ++++ requirements.txt | 1 + 9 files changed, 927 insertions(+) create mode 100644 plugins/action/azure_rm_arcssh.py create mode 100644 plugins/modules/azure_rm_arcssh.py create mode 100644 plugins/plugin_utils/connectivity_utils.py create mode 100644 plugins/plugin_utils/constants.py create mode 100644 plugins/plugin_utils/file_utils.py create mode 100644 plugins/plugin_utils/ssh_info.py diff --git a/plugins/action/azure_rm_arcssh.py b/plugins/action/azure_rm_arcssh.py new file mode 100644 index 000000000..066a4b5d3 --- /dev/null +++ b/plugins/action/azure_rm_arcssh.py @@ -0,0 +1,144 @@ +# Copyright (c) 2024 Bill Peck +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.errors import AnsibleActionFail +from ansible.plugins.action import ActionBase +from ansible.utils.display import Display +from ansible_collections.azure.azcollection.plugins.plugin_utils import (file_utils, ssh_info, connectivity_utils) +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common_rest import GenericRestClient +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMAuth +from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AZURE_COMMON_ARGS + + +display = Display() + + +class ActionModule(ActionBase): + ''' Configures ssh proxy for connecting to ARC hosts ''' + + TRANSFERS_FILES = False + BYPASS_HOST_LOOP = True + + def __init__(self, *args, **kwargs): + if not connectivity_utils.HAS_ORAS: + raise AnsibleActionFail("oras.client is not installed.", orig_exc=connectivity_utils.HAS_ORAS_EXC) + super(ActionModule, self).__init__(*args, **kwargs) + + def run(self, tmp=None, task_vars=None): + ''' run the pause action module ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + del tmp # tmp no longer has any effect + + merged_arg_spec = dict() + merged_arg_spec.update(AZURE_COMMON_ARGS) + merged_arg_spec.update( + { + 'inventory_hostname': {'type': 'str'}, + 'ansible_host': {'type': 'str'}, + 'resource_group': {'type': 'str'}, + 'resource_type': {'type': 'str'}, + 'private_key_file': {'type': 'str'}, + 'local_user': {'type': 'str'}, + 'port': {'type': 'int'}, + 'ssh_config_file': {'type': 'str'}, + 'ssh_relay_file': {'type': 'str'}, + 'ssh_proxy_folder': {'type': 'str'} + } + ) + + validation_result, new_module_args = self.validate_argument_spec( + argument_spec=merged_arg_spec, + ) + + auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', None) or new_module_args.get('auth_source') + auth_options = dict( + auth_source=auth_source, + profile=new_module_args.get('profile'), + subscription_id=new_module_args.get('subscription_id'), + client_id=new_module_args.get('client_id'), + secret=new_module_args.get('secret'), + tenant=new_module_args.get('tenant'), + ad_user=new_module_args.get('ad_user'), + password=new_module_args.get('password'), + cloud_environment=new_module_args.get('cloud_environment'), + cert_validation_mode=new_module_args.get('cert_validation_mode'), + api_profile=new_module_args.get('api_profile'), + track1_cred=True, + adfs_authority_url=new_module_args.get('adfs_authority_url') + ) + + inventory_hostname = new_module_args.get('inventory_hostname') + ansible_host = new_module_args.get('ansible_host') + resource_group = new_module_args.get('resource_group') + resource_type = new_module_args.get('resource_type') + private_key_file = new_module_args.get('private_key_file') + local_user = new_module_args.get('local_user') + port = new_module_args.get('port') + ssh_config_file = new_module_args.get('ssh_config_file') + ssh_relay_file = new_module_args.get('ssh_relay_file') + ssh_proxy_folder = new_module_args.get('ssh_proxy_folder') + result.update(dict( + changed=False, + rc=0, + stderr='', + stdout='' + )) + + ######################################################################## + # Begin the hard work! + + azure_auth = AzureRMAuth(**auth_options) + rest_client = GenericRestClient(azure_auth.azure_credential_track2, + azure_auth.subscription_id, + azure_auth._cloud_environment.endpoints.resource_manager, + credential_scopes=[azure_auth._cloud_environment.endpoints.resource_manager + ".default"]) + + config_session = ssh_info.ConfigSession(ssh_config_file, + ssh_relay_file, + resource_group, + inventory_hostname, + ansible_host, + private_key_file, + local_user, + port, + resource_type, + ssh_proxy_folder) + + try: + cert_lifetime = None # If set to None we default to the max which is 1 hour + config_session.proxy_path = connectivity_utils.install_client_side_proxy(config_session.ssh_proxy_folder) + (config_session.relay_info, + config_session.new_service_config) = connectivity_utils.get_relay_information(rest_client, + azure_auth.subscription_id, + config_session.resource_group_name, + config_session.hostname, + config_session.resource_type, + cert_lifetime, + config_session.port) + except Exception as e: + raise AnsibleActionFail("Failed to get relay information.", orig_exc=e) + + config_text = config_session.get_config_text() + ssh_config_path = config_session.ssh_config_file + + ssh_config_dir = os.path.dirname(ssh_config_path) + if not os.path.isdir(ssh_config_dir): + os.makedirs(ssh_config_dir) + + file_utils.write_to_file(ssh_config_path, + 'w', + '\n'.join(config_text), + f"Couldn't write ssh config to file {ssh_config_path}.", + 'utf-8') + + result['stdout'] = "SSH proxy configured for %s in %s" % (inventory_hostname, config_session.ssh_config_file) + return result diff --git a/plugins/doc_fragments/azure_rm.py b/plugins/doc_fragments/azure_rm.py index ff2fbe96b..40c6236e7 100644 --- a/plugins/doc_fragments/azure_rm.py +++ b/plugins/doc_fragments/azure_rm.py @@ -26,6 +26,10 @@ class ModuleDocFragment(object): description: A list of resource group names to search for Azure StackHCI virtual machines. '\*' will include all resource groups in the subscription. default: [] + include_arc_resource_groups: + description: A list of resource group names to search for Azure ARC machines. '\*' will include all + resource groups in the subscription. + default: [] include_vmss_resource_groups: description: A list of resource group names to search for virtual machine scale sets (VMSSs). '\*' will include all resource groups in the subscription. diff --git a/plugins/inventory/azure_rm.py b/plugins/inventory/azure_rm.py index 9741c519a..8640e59d3 100644 --- a/plugins/inventory/azure_rm.py +++ b/plugins/inventory/azure_rm.py @@ -68,6 +68,10 @@ include_hcivm_resource_groups: - myrg1 +# fetches ARC hosts in specific resource groups (defaults to no ARC fetch) +include_arc_resource_groups: + - myrg1 + # places a host in the named group if the associated condition evaluates to true conditional_groups: # since this will be true for every host, every host sourced from this inventory plugin config will be in the @@ -298,6 +302,15 @@ def _enqueue_vm_list(self, rg='*'): url = url.format(subscriptionId=self._clientconfig.subscription_id, rg=rg) self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vm_page_response) + def _enqueue_arc_list(self, rg='*'): + if not rg or rg == '*': + url = '/subscriptions/{subscriptionId}/providers/Microsoft.HybridCompute/machines' + else: + url = '/subscriptions/{subscriptionId}/resourceGroups/{rg}/providers/Microsoft.HybridCompute/machines' + + url = url.format(subscriptionId=self._clientconfig.subscription_id, rg=rg) + self._enqueue_get(url=url, api_version=self._hybridcompute_api_version, handler=self._on_arc_page_response) + def _enqueue_arcvm_list(self, rg='*'): if not rg or rg == '*': url = '/subscriptions/{subscriptionId}/providers/Microsoft.HybridCompute/machines' @@ -324,6 +337,9 @@ def _get_hosts(self): for vm_rg in self.get_option('include_vm_resource_groups'): self._enqueue_vm_list(vm_rg) + for arc_rg in self.get_option('include_arc_resource_groups'): + self._enqueue_arc_list(arc_rg) + for vm_rg in self.get_option('include_hcivm_resource_groups'): self._enqueue_arcvm_list(vm_rg) @@ -434,6 +450,15 @@ def _on_vm_page_response(self, response, vmss=None, arcvm=None): # FUTURE: add direct VM filtering by tag here (performance optimization)? self._hosts.append(AzureHost(h, self, vmss=vmss, arcvm=arcvm, legacy_name=self._legacy_hostnames)) + def _on_arc_page_response(self, response): + next_link = response.get('nextLink') + + if next_link: + self._enqueue_get(url=next_link, api_version=self._hybridcompute_api_version, handler=self._on_arc_page_response) + + for arcvm in response['value']: + self._hosts.append(ArcHost(arcvm, self, legacy_name=self._legacy_hostnames)) + def _on_arcvm_page_response(self, response): next_link = response.get('nextLink') @@ -444,6 +469,7 @@ def _on_arcvm_page_response(self, response): url = '{0}/providers/Microsoft.AzureStackHCI/virtualMachineInstances'.format(arcvm['id']) # Stack HCI instances look close enough to regular VMs that we can share the handler impl... self._enqueue_get(url=url, api_version=self._stackhci_api_version, handler=self._on_vm_page_response, handler_args=dict(arcvm=arcvm)) + self._hosts.append(ArcHost(arcvm, self, legacy_name=self._legacy_hostnames)) def _on_vmss_page_response(self, response): next_link = response.get('nextLink') @@ -584,6 +610,69 @@ def _legacy_script_compatible_group_sanitization(name): # VMSS VMs (all SS, N specific SS, N resource groups?): SS -> VM -> InstanceView, N NICs, N PublicIPAddress) +class ArcHost(object): + def __init__(self, arc_model, inventory_client, legacy_name=False): + self._inventory_client = inventory_client + self._arc_model = arc_model + self._instanceview = self._arc_model + self._status = self._arc_model['properties'].get('status', {}).lower() # 'Connected' + self._powerstate = self._status.replace('connected', 'running') + + self._hostvars = {} + + arc_name = self._arc_model['name'] + + if legacy_name: + self.default_inventory_hostname = arc_name + else: + # Azure often doesn't provide a globally-unique filename, so use resource name + a chunk of ID hash + self.default_inventory_hostname = '{0}_{1}'.format(arc_name, hashlib.sha1(to_bytes(arc_model['id'])).hexdigest()[0:4]) + + @property + def hostvars(self): + if self._hostvars != {}: + return self._hostvars + + properties = self._arc_model.get('properties', {}) + new_hostvars = dict( + network_interface=[], + mac_address=[], + ansible_all_ipv4_addresses=[], + ansible_all_ipv6_addresses=[], + public_ipv4_address=[], + private_ipv4_addresses=[], + public_dns_hostnames=[], + ansible_dns=[], + id=self._arc_model['id'], + location=self._arc_model['location'], + name=self._arc_model['name'], + default_inventory_hostname=self.default_inventory_hostname, + powerstate=self._powerstate, + status=self._status, + provisioning_state=properties.get('provisioningState', 'unknown').lower(), + os_profile=dict( + sku=properties.get('osSku', 'unknown'), + system=properties.get('osType', 'unknown'), + version=properties.get('osVersion', 'unknown'), + ), + tags=self._arc_model.get('tags', {}), + resource_type=self._arc_model.get('type', "unknown"), + resource_group=parse_resource_id(self._arc_model['id']).get('resource_group').lower(), + ) + + for nic in properties.get('networkProfile', {}).get('networkInterfaces', []): + new_hostvars['mac_address'].append(nic.get('macAddress')) + new_hostvars['network_interface'].append(nic.get('name')) + for ipaddr in nic.get('ipAddresses', []): + ipAddressVersion = ipaddr.get('ipAddressVersion') + if ipAddressVersion == 'IPv4': + new_hostvars['ansible_all_ipv4_addresses'].append(ipaddr.get('address')) + if ipAddressVersion == 'IPv6': + new_hostvars['ansible_all_ipv6_addresses'].append(ipaddr.get('address')) + self._hostvars = new_hostvars + return self._hostvars + + class AzureHost(object): _powerstate_regex = re.compile('^PowerState/(?P.+)$') diff --git a/plugins/modules/azure_rm_arcssh.py b/plugins/modules/azure_rm_arcssh.py new file mode 100644 index 000000000..587c31589 --- /dev/null +++ b/plugins/modules/azure_rm_arcssh.py @@ -0,0 +1,117 @@ +# Copyright (c) 2024 Bill Peck +# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import annotations + +DOCUMENTATION = ''' +--- +module: azure_rm_arcssh +version_added: "3.0.0" +short_description: Configure SSH proxy for ARC hosts +description: + - Configure SSH proxy for ARC hosts. Use this in combination with the + azure_rm inventory plugin with include_arc_resource_groups to dynamically + generate the list of hosts. + +options: + resource_group: + description: + - The name of the Resource Group to which the server belongs. + required: True + type: str + resource_type: + description: + - The name of the Resource Type to which the server belongs. + required: True + type: str + inventory_hostname: + description: + - The inventory configured hostname. + type: str + ansible_host: + description: + - The discovered hostname that is generated by the inventory config. + - Please see the examples on how this can be set based on the resource_type. + type: str + private_key_file: + description: + - The Private key that matches the public key that you have already added to your authorized_hosts + of local_user. + default: ~/.ssh/id_rsa + type: str + local_user: + description: + - The local user that you use to log into your server. + type: str + port: + description: + - The port that your ssh server is listening on. + default: 22 + type: str + ssh_config_file: + description: + - Host specific file location for ssh config. It must match the -F argument passed to + ansible_ssh_common_args. This will be overwritten and should be different for each host. + - See the Examples for how to set this variable to match both the task and inventory plugin. + required: True + type: str + ssh_relay_file: + description: + - Host specific file location for the relay file. + required: True + type: str + ssh_proxy_folder: + description: + - The folder which will store the client ssh proxy. + default: ~/.clientsshproxy + type: str + +extends_documentation_fragment: + - azure.azcollection.azure + +author: + - Bill Peck (bpeck@redhat.com) +''' + +EXAMPLES = ''' +# Inventory plugin: +# --- +# plugin: azure.azcollection.azure_rm +# plain_host_names: yes +# include_arc_resource_groups: ['*'] +# +# +# hostvar_expressions: +# ansible_host: "resource_group + '-' + name if 'Microsoft.HybridCompute/machines' == resource_type else (public_dns_hostnames + public_ipv4_address) | first" +# ansible_ssh_common_args: "'-F /tmp/' + resource_group + '-' + name + '/ssh_config' if 'Microsoft.HybridCompute/machines' == resource_type" +# +# keyed_groups: +# # places each host in a group named 'tag_(tag name)_(tag value)' for each tag on a VM. +# - prefix: "type" +# key: resource_type +# trailing_separator: false + +- name: Configure ARC SSH Proxy + hosts: localhost + connection: local + tasks: + - name: Setup Proxy + azure.azcollection.azure_rm_arcssh: + inventory_hostname: "{{ item }}" + ansible_host: "{{ hostvars[item].ansible_host }}" + local_user: admin + resource_group: "{{ hostvars[item].resource_group }}" + resource_type: "{{ hostvars[item].resource_type }}" + private_key_file: "~/.ssh/id_rsa" + ssh_config_file: "/tmp/{{ hostvars[item].resource_group }}-{{ item }}/ssh_config" + ssh_relay_file: "/tmp/{{ hostvars[item].resource_group }}-{{ item }}/relay_info" + ssh_proxy_folder: "~/.clientsshproxy" + loop: "{{ groups['type_Microsoft_HybridCompute_machines'] }}" + +- name: Ping ARC Hosts + hosts: type_Microsoft_HybridCompute_machines + tasks: + - name: Ping + ansible.builtin.ping: +''' diff --git a/plugins/plugin_utils/connectivity_utils.py b/plugins/plugin_utils/connectivity_utils.py new file mode 100644 index 000000000..997255b25 --- /dev/null +++ b/plugins/plugin_utils/connectivity_utils.py @@ -0,0 +1,373 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import time +import os +import json +import base64 +import tarfile +from glob import glob +import traceback + +HAS_ORAS = False +HAS_ORAS_EXC = None + +try: + import oras.client + HAS_ORAS = True +except ImportError: + oras = None + HAS_ORAS_EXC = traceback.format_exc() + +try: + from azure.core.exceptions import ResourceNotFoundError, HttpResponseError + from azure.mgmt.core.tools import resource_id, parse_resource_id +except ImportError: + pass + +from ansible_collections.azure.azcollection.plugins.plugin_utils import file_utils +from ansible_collections.azure.azcollection.plugins.plugin_utils import constants as consts + +from ansible.errors import AnsibleParserError, AnsibleError + + +# Get the Access Details to connect to Arc Connectivity platform from the HybridConnectivity RP +def get_relay_information(rest_client, subscription_id, resource_group, hostname, resource_type, + certificate_validity_in_seconds, port): + if not certificate_validity_in_seconds or \ + certificate_validity_in_seconds > consts.RELAY_INFO_MAXIMUM_DURATION_IN_SECONDS: + certificate_validity_in_seconds = consts.RELAY_INFO_MAXIMUM_DURATION_IN_SECONDS + + namespace = resource_type.split('/', 1)[0] + arc_type = resource_type.split('/', 1)[1] + resource_uri = resource_id(subscription=subscription_id, resource_group=resource_group, + namespace=namespace, type=arc_type, name=hostname) + + cred = None + new_service_config = False + try: + cred = _list_credentials(rest_client, resource_uri, certificate_validity_in_seconds) + except ResourceNotFoundError: + _create_default_endpoint(rest_client, resource_uri) + except HttpResponseError as e: + if e.reason != "Precondition Failed": + raise AnsibleError(f"Unable to retrieve relay information. Failed with error: {str(e)}") + except Exception as e: + raise AnsibleError(f"Unable to retrieve relay information. Failed with error: {str(e)}") + + if not cred: + _create_service_configuration(rest_client, resource_uri, port) + new_service_config = True + try: + cred = _list_credentials(rest_client, resource_uri, certificate_validity_in_seconds) + except Exception as e: + raise AnsibleError(f"Unable to get relay information. Failed with error: {str(e)}") + _handle_relay_connection_delay(rest_client, "Setting up service configuration") + else: + if not _check_service_configuration(rest_client, resource_uri, port): + _create_service_configuration(rest_client, resource_uri, port) + new_service_config = True + try: + cred = _list_credentials(rest_client, resource_uri, certificate_validity_in_seconds) + except Exception as e: + raise AnsibleError(f"Unable to get relay information. Failed with error: {str(e)}") + _handle_relay_connection_delay(rest_client, "Setting up service configuration") + return (cred, new_service_config) + + +def _check_service_configuration(rest_client, resource_uri, port): + url = f"/{resource_uri}/providers/Microsoft.HybridConnectivity/endpoints/default/serviceConfigurations/SSH" + + serviceConfig = None + # pylint: disable=broad-except + try: + serviceConfig = resource(rest_client, url, "GET") + except Exception: + # If for some reason the request for Service Configuration fails, + # we will still attempt to get relay information and connect. If the service configuration + # is not setup correctly, the connection will fail. + # The more likely scenario is that the request failed with a "Authorization Error", + # in case the user isn't an owner/contributor. + return True + serviceConfigPort = serviceConfig and serviceConfig.get('properties', {}).get('port') + if port: + return int(serviceConfigPort) == int(port) + + return True + + +def _create_default_endpoint(rest_client, resource_uri): + url = f"/{resource_uri}/providers/Microsoft.HybridConnectivity/endpoints/default" + body = {'properties': {'type': 'default'}} + + hostname = parse_resource_id(resource_uri)["name"] + resource_group = parse_resource_id(resource_uri)["resource_group"] + try: + endpoint = resource(rest_client, url, "PUT", body=body) + except HttpResponseError as e: + if e.reason == "Forbidden": + raise AnsibleError(f"Client is not authorized to create a default connectivity " + f"endpoint for \'{hostname}\' in Resource Group \'{resource_group}\'. " + f"This is a one-time operation that must be performed by " + f"an account with Owner or Contributor role to allow " + f"connections to the target resource.") + raise AnsibleError(f"Failed to create default endpoint for the target Arc Server. " + f"\nError: {str(e)}") + except Exception as e: + raise AnsibleError(f"Failed to create default endpoint for the target Arc Server. " + f"\nError: {str(e)}") + + return endpoint + + +def _create_service_configuration(rest_client, resource_uri, port): + if not port: + port = '22' + + url = f"/{resource_uri}/providers/Microsoft.HybridConnectivity/endpoints/default/serviceConfigurations/SSH" + body = {'properties': {'port': int(port), 'serviceName': 'SSH' }} + + hostname = parse_resource_id(resource_uri)["name"] + resource_group = parse_resource_id(resource_uri)["resource_group"] + + try: + serviceConfig = resource(rest_client, url, "PUT", body=body) + except HttpResponseError as e: + if e.reason == "Forbidden": + raise AnsibleError(f"Client is not authorized to create or update the Service " + f"Configuration endpoint for \'{hostname}\' in the Resource " + f"Group \'{resource_group}\'. This is an operation that " + f"must be performed by an account with Owner or Contributor " + f"role to allow SSH connections to the specified port {port}.") + raise AnsibleError(f"Failed to create service configuration to allow SSH " + f"connections to port {port} on the endpoint for {hostname} " + f"in the Resource Group {resource_group}\nError: {str(e)}") + except Exception as e: + raise AnsibleError(f"Failed to create service configuration to allow SSH connections " + f"to port {port} on the endpoint for {hostname} in the Resource " + f"Group {resource_group}\nError: {str(e)}") + return serviceConfig + + +def _list_credentials(rest_client, resource_uri, certificate_validity_in_seconds): + url = f"/{resource_uri}/providers/Microsoft.HybridConnectivity/endpoints/default/listCredentials" + query_parameters = {'expiresin': int(certificate_validity_in_seconds)} + body = {'serviceName': 'SSH'} + + response = resource(rest_client, url, "POST", body=body, query_parameters=query_parameters) + return response and response.get('relay') + + +def format_relay_info_string(relay_info): + relay_info_string = json.dumps( + { + "relay": { + "namespaceName": relay_info['namespaceName'], + "namespaceNameSuffix": relay_info['namespaceNameSuffix'], + "hybridConnectionName": relay_info['hybridConnectionName'], + "accessKey": relay_info['accessKey'], + "expiresOn": relay_info['expiresOn'], + "serviceConfigurationToken": relay_info['serviceConfigurationToken'] + } + }) + result_bytes = relay_info_string.encode("ascii") + enc = base64.b64encode(result_bytes) + base64_result_string = enc.decode("ascii") + return base64_result_string + + +def _handle_relay_connection_delay(rest_client, message): + # relay has retry delay after relay connection is lost + # must sleep for at least as long as the delay + # otherwise the ssh connection will fail + for x in range(0, consts.SERVICE_CONNECTION_DELAY_IN_SECONDS + 1): + time.sleep(1) + + +# Downloads client side proxy to connect to Arc Connectivity Platform +def install_client_side_proxy(arc_proxy_folder): + + client_operating_system = _get_client_operating_system() + client_architecture = _get_client_architeture() + install_dir = _get_proxy_install_dir(arc_proxy_folder) + proxy_name = _get_proxy_filename(client_operating_system, client_architecture) + install_location = os.path.join(install_dir, proxy_name) + + # Only download new proxy if it doesn't exist already + if not os.path.isfile(install_location): + if not os.path.isdir(install_dir): + file_utils.create_directory(install_dir, f"Failed to create client proxy directory '{install_dir}'.") + # if directory exists, delete any older versions of the proxy + else: + older_version_location = _get_older_version_proxy_path( + install_dir, + client_operating_system, + client_architecture) + older_version_files = glob(older_version_location) + for f in older_version_files: + file_utils.delete_file(f, f"failed to delete older version file {f}", warning=True) + + _download_proxy_from_MCR(install_dir, proxy_name, client_operating_system, client_architecture) + _check_proxy_installation(install_dir, proxy_name) + + return install_location + + +def _download_proxy_from_MCR(dest_dir, proxy_name, operating_system, architecture): + mar_target = f"{consts.CLIENT_PROXY_MCR_TARGET}/{operating_system.lower()}/{architecture}/ssh-proxy" + + client = oras.client.OrasClient() + + try: + response = client.pull(target=f"{mar_target}:{consts.CLIENT_PROXY_VERSION}", outdir=dest_dir) + except Exception as e: + raise AnsibleError( + f"Failed to download Arc Connectivity proxy with error {str(e)}. Please try again.") + + proxy_package_path = _get_proxy_package_path_from_oras_response(response) + _extract_proxy_tar_files(proxy_package_path, dest_dir, proxy_name) + file_utils.delete_file(proxy_package_path, f"Failed to delete {proxy_package_path}. Please delete manually.", True) + + +def _get_proxy_package_path_from_oras_response(pull_response): + if not isinstance(pull_response, list): + raise AnsibleError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again.") + + if len(pull_response) != 1: + for r in pull_response: + file_utils.delete_file(r, f"Failed to delete {r}. Please delete it manually.", True) + raise AnsibleError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again.") + + proxy_package_path = pull_response[0] + + if not os.path.isfile(proxy_package_path): + raise AnsibleError("Unable to download Arc Connectivity Proxy. Please try again.") + + return proxy_package_path + + +def _extract_proxy_tar_files(proxy_package_path, install_dir, proxy_name): + with tarfile.open(proxy_package_path, 'r:gz') as tar: + members = [] + for member in tar.getmembers(): + if member.isfile(): + filenames = member.name.split('/') + + if len(filenames) != 2: + tar.close() + file_utils.delete_file( + proxy_package_path, + f"Failed to delete {proxy_package_path}. Please delete it manually.", + True) + raise AnsibleError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again.") + + member.name = filenames[1] + + if member.name.startswith('sshproxy'): + member.name = proxy_name + elif member.name.lower() not in ['license.txt', 'thirdpartynotice.txt']: + tar.close() + file_utils.delete_file( + proxy_package_path, + f"Failed to delete {proxy_package_path}. Please delete it manually.", + True) + raise AnsibleError( + "Attempt to download Arc Connectivity Proxy returned unnexpected result. Please try again.") + + members.append(member) + + tar.extractall(members=members, path=install_dir) + + +def _check_proxy_installation(install_dir, proxy_name): + proxy_filepath = os.path.join(install_dir, proxy_name) + if not os.path.isfile(proxy_filepath): + raise AnsibleError( + "Failed to install required SSH Arc Connectivity Proxy. " + f"Couldn't find expected file {proxy_filepath}. Please try again.") + + +def _get_proxy_filename(operating_system, architecture): + if operating_system.lower() == 'darwin' and architecture == '386': + raise AnsibleError("Unsupported Darwin OS with 386 architecture.") + proxy_filename = \ + f"sshProxy_{operating_system.lower()}_{architecture}_{consts.CLIENT_PROXY_VERSION.replace('.', '_')}" + if operating_system.lower() == 'windows': + proxy_filename += '.exe' + return proxy_filename + + +def _get_older_version_proxy_path(install_dir, operating_system, architecture): + proxy_name = f"sshProxy_{operating_system.lower()}_{architecture}_*" + return os.path.join(install_dir, proxy_name) + + +def _get_proxy_install_dir(arc_proxy_folder): + if not arc_proxy_folder: + return os.path.expanduser(os.path.join('~', ".clientsshproxy")) + return arc_proxy_folder + + +def _get_client_architeture(): + import platform + machine = platform.machine() + architecture = None + + if "arm64" in machine.lower() or "aarch64" in machine.lower(): + architecture = 'arm64' + elif machine.endswith('64'): + architecture = 'amd64' + elif machine.endswith('86'): + architecture = '386' + elif machine == '': + raise AnsibleError("Couldn't identify the platform architecture.") + else: + raise AnsibleError(f"Unsuported architecture: {machine} is not currently supported") + + return architecture + + +def _get_client_operating_system(): + import platform + operating_system = platform.system() + + if operating_system.lower() not in ('linux', 'darwin', 'windows'): + raise AnsibleError(f"Unsuported OS: {operating_system} platform is not currently supported") + return operating_system + +def resource(client, + url, + method, + body=None, + query_parameters={}, + header_parameters={}, + status_code = [200, 201, 202]): + + query_parameters['api-version'] = '2023-03-15' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + + response = client.query(url, + method, + query_parameters, + header_parameters, + body, + status_code, + 600, + 30) + + if hasattr(response, 'body'): + try: + response = json.loads(response.body()) + except Exception: + response = response.body() + elif hasattr(response, 'context'): + response = response.context['deserialized_data'] + else: + response = None + + return response diff --git a/plugins/plugin_utils/constants.py b/plugins/plugin_utils/constants.py new file mode 100644 index 000000000..cb88571b8 --- /dev/null +++ b/plugins/plugin_utils/constants.py @@ -0,0 +1,48 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +AGENT_MINIMUM_VERSION_MAJOR = 1 +AGENT_MINIMUM_VERSION_MINOR = 31 +CLIENT_PROXY_VERSION = "1.3.026973" +CLIENT_PROXY_MCR_TARGET = "mcr.microsoft.com/azureconnectivity/proxy" +CLEANUP_TOTAL_TIME_LIMIT_IN_SECONDS = 120 +CLEANUP_TIME_INTERVAL_IN_SECONDS = 10 +CLEANUP_AWAIT_TERMINATION_IN_SECONDS = 30 +RELAY_INFO_MAXIMUM_DURATION_IN_SECONDS = 3600 +RETRY_DELAY_IN_SECONDS = 10 +SERVICE_CONNECTION_DELAY_IN_SECONDS = 15 +WINDOWS_INVALID_FOLDERNAME_CHARS = "\\/*:<>?\"|" +RDP_TERMINATE_SSH_WAIT_TIME_IN_SECONDS = 30 + +ARC_RESOURCE_TYPE_PLACEHOLDER = "arc_resource_type_placeholder" + +SUPPORTED_RESOURCE_TYPES = ["microsoft.hybridcompute/machines", + "microsoft.compute/virtualmachines", + "microsoft.connectedvmwarevsphere/virtualmachines", + "microsoft.scvmm/virtualmachines", + "microsoft.azurestackhci/virtualmachines"] + +# Old version incorrectly used resource providers instead of resource type. +# Will continue to support to avoid breaking backwards compatibility. +LEGACY_SUPPORTED_RESOURCE_TYPES = ["microsoft.hybridcompute", + "microsoft.compute", + "microsoft.connectedvmwarevsphere", + "microsoft.scvmm", + "microsoft.azurestackhci"] + +RESOURCE_PROVIDER_TO_RESOURCE_TYPE = { + "microsoft.hybridcompute": "Microsoft.HybridCompute/machines", + "microsoft.compute": "Microsoft.Compute/virtualMachines", + "microsoft.connectedvmwarevsphere": "Microsoft.ConnectedVMwarevSphere/virtualMachines", + "microsoft.azurestackhci": "Microsoft.AzureStackHCI/virtualMachines", + "microsoft.scvmm": "Microsoft.ScVmm/virtualMachines" +} + +RESOURCE_TYPE_LOWER_CASE_TO_CORRECT_CASE = { + "microsoft.hybridcompute/machines": "Microsoft.HybridCompute/machines", + "microsoft.compute/virtualmachines": "Microsoft.Compute/virtualMachines", + "microsoft.connectedvmwarevsphere/virtualmachines": "Microsoft.ConnectedVMwarevSphere/virtualMachines", + "microsoft.scvmm/virtualmachines": "Microsoft.ScVmm/virtualMachines", + "microsoft.azurestackhci/virtualmachines": "Microsoft.AzureStackHCI/virtualMachines" +} diff --git a/plugins/plugin_utils/file_utils.py b/plugins/plugin_utils/file_utils.py new file mode 100644 index 000000000..9e41bd639 --- /dev/null +++ b/plugins/plugin_utils/file_utils.py @@ -0,0 +1,88 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import errno +import os + +from ansible.utils.display import Display +from ansible_collections.azure.azcollection.plugins.plugin_utils import constants as consts +from ansible.errors import AnsibleParserError, AnsibleError + + +display = Display() + + +def make_dirs_for_file(file_path): + if not os.path.exists(file_path): + mkdir_p(os.path.dirname(file_path)) + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: # Python <= 2.5 + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + + +def delete_file(file_path, message, warning=False): + # pylint: disable=broad-except + if os.path.isfile(file_path): + try: + os.remove(file_path) + except Exception as e: + if warning: + display(message) + else: + raise AnsibleError(message + "Error: " + str(e)) from e + + +def delete_folder(dir_path, message, warning=False): + # pylint: disable=broad-except + if os.path.isdir(dir_path): + try: + os.rmdir(dir_path) + except Exception as e: + if warning: + display(message) + else: + raise AnsibleError(message + "Error: " + str(e)) from e + + +def create_directory(file_path, error_message): + try: + os.makedirs(file_path) + except Exception as e: + raise AnsibleError(error_message + "Error: " + str(e)) from e + + +def write_to_file(file_path, mode, content, error_message, encoding=None): + # pylint: disable=unspecified-encoding + try: + if encoding: + with open(file_path, mode, encoding=encoding) as f: + f.write(content) + else: + with open(file_path, mode) as f: + f.write(content) + except Exception as e: + raise AnsibleError(error_message + "Error: " + str(e)) from e + + +def get_line_that_contains(substring, lines): + for line in lines: + if substring in line: + return line + return None + + +def remove_invalid_characters_foldername(folder_name): + new_foldername = "" + for c in folder_name: + if c not in const.WINDOWS_INVALID_FOLDERNAME_CHARS: + new_foldername += c + return new_foldername diff --git a/plugins/plugin_utils/ssh_info.py b/plugins/plugin_utils/ssh_info.py new file mode 100644 index 000000000..178a3e439 --- /dev/null +++ b/plugins/plugin_utils/ssh_info.py @@ -0,0 +1,63 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + + +import os +from ansible_collections.azure.azcollection.plugins.plugin_utils import file_utils +from ansible_collections.azure.azcollection.plugins.plugin_utils import connectivity_utils + + +class ConfigSession(): + # pylint: disable=too-many-instance-attributes + def __init__(self, ssh_config_file, ssh_relay_file, resource_group_name, hostname, ansible_host, + private_key_file, local_user, port, resource_type, ssh_proxy_folder): + self.resource_group_name = resource_group_name + self.hostname = hostname + self.ansible_host = ansible_host + self.local_user = local_user + self.port = port + self.resource_type = resource_type + self.proxy_path = None + self.relay_info = None + self.relay_info_path = None + self.ssh_config_file = os.path.abspath(os.path.expanduser(ssh_config_file)) + self.ssh_relay_file = os.path.abspath(os.path.expanduser(ssh_relay_file)) + self.private_key_file = os.path.abspath(os.path.expanduser(private_key_file)) if private_key_file else None + self.ssh_proxy_folder = os.path.abspath(os.path.expanduser(ssh_proxy_folder)) if ssh_proxy_folder else None + + + def get_config_text(self): + lines = [""] + self.relay_info_path = self._create_relay_info_file() + lines = lines + self._get_arc_entry() + return lines + + def _get_arc_entry(self): + lines = [] + lines.append("Host " + self.ansible_host) + lines.append("\tHostName " + self.hostname) + lines.append("\tUser " + self.local_user) + if self.private_key_file: + lines.append("\tIdentityFile \"" + self.private_key_file + "\"") + if self.port: + lines.append("\tProxyCommand \"" + self.proxy_path + "\" " + "-r \"" + self.relay_info_path + "\" " + + "-p " + str(self.port)) + else: + lines.append("\tProxyCommand \"" + self.proxy_path + "\" " + "-r \"" + self.relay_info_path + "\"") + return lines + + def _create_relay_info_file(self): + relay_info_path = self.ssh_relay_file + relay_info_dir = os.path.dirname(relay_info_path) + if not os.path.isdir(relay_info_dir): + os.makedirs(relay_info_dir) + + # Overwrite relay_info if it already exists in that folder. + file_utils.delete_file(relay_info_path, f"{relay_info_path} already exists, and couldn't be overwritten.") + file_utils.write_to_file(relay_info_path, 'w', connectivity_utils.format_relay_info_string(self.relay_info), + f"Couldn't write relay information to file {relay_info_path}.", 'utf-8') + os.chmod(relay_info_path, 0o644) + + return relay_info_path diff --git a/requirements.txt b/requirements.txt index 421e078b4..6212f5f5c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,3 +49,4 @@ azure-mgmt-recoveryservices==3.0.0 azure-mgmt-recoveryservicesbackup==9.1.0 azure-mgmt-notificationhubs==8.1.0b1 azure-mgmt-eventhub==11.1.0 +oras