From 96389f1b2130c6e1278ef7022643b109ac2bb286 Mon Sep 17 00:00:00 2001 From: Mark Beacom Date: Tue, 24 Mar 2020 17:22:05 -0400 Subject: [PATCH] Adjust linting (#110) * Adjust linting and black config * Adjust GH action for linting --- .github/workflows/lint-python.yml | 4 +- Makefile | 2 +- cloudendure/api.py | 38 +-- cloudendure/cloudendure.py | 364 +++++++--------------------- cloudendure/config.py | 19 +- cloudendure/events.py | 4 +- cloudendure/models.py | 17 +- cloudendure/templates.py | 4 +- lambda/copy_ami.py | 23 +- lambda/handler.py | 11 +- pyproject.toml | 19 +- step/lambdas/copy_image.py | 8 +- step/lambdas/create_image.py | 3 +- step/lambdas/find_instance.py | 4 +- step/lambdas/get_copy_status.py | 8 +- step/lambdas/get_image_status.py | 4 +- step/lambdas/get_instance_status.py | 4 +- step/lambdas/image_cleanup.py | 4 +- step/lambdas/migrationstate.py | 6 +- step/lambdas/share_image.py | 4 +- step/lambdas/split_image.py | 11 +- 21 files changed, 156 insertions(+), 405 deletions(-) diff --git a/.github/workflows/lint-python.yml b/.github/workflows/lint-python.yml index f1619cef4..15bc4891a 100644 --- a/.github/workflows/lint-python.yml +++ b/.github/workflows/lint-python.yml @@ -11,7 +11,7 @@ jobs: strategy: max-parallel: 4 matrix: - python-version: ['3.7'] + python-version: ['3.8'] steps: - uses: actions/checkout@v1 - name: Set up Python ${{ matrix.python-version }} @@ -34,4 +34,4 @@ jobs: - name: Lint with Black run: | black --version - black --target-version py37 --check . + black --check . diff --git a/Makefile b/Makefile index 178e20e3d..ebc5bf8b5 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ push: ## Push the Docker image to the Docker Hub repository. docker: build build_py38 ## Build and publish Docker images. lint: isort ## Lint the CloudEndure project with Black. - @poetry run black --target-version py37 . + @poetry run black . update_prereqs: ## Update the local development pre-requisite packages. @pip install --upgrade wheel setuptools pip diff --git a/cloudendure/api.py b/cloudendure/api.py index 71bc36886..3342b43aa 100644 --- a/cloudendure/api.py +++ b/cloudendure/api.py @@ -117,24 +117,16 @@ def login(self, username: str = "", password: str = "", token: str = "") -> bool return False # Attempt to login to the CloudEndure API via a POST request. - response: requests.Response = self.api_call( - "login", "post", data=json.dumps(_auth) - ) + response: requests.Response = self.api_call("login", "post", data=json.dumps(_auth)) # Check whether or not the request was successful. if response.status_code not in [200, 307]: if response.status_code == 401: - print( - "\nBad CloudEndure Credentials! Check your username/password and try again!\n" - ) + print("\nBad CloudEndure Credentials! Check your username/password and try again!\n") elif response.status_code == 402: - print( - "\nNo CloudEndure License! Please configure your account and try again!\n" - ) + print("\nNo CloudEndure License! Please configure your account and try again!\n") elif response.status_code == 429: - print( - "\nCloudEndure authentication failure limit reached! Please try again later!\n" - ) + print("\nCloudEndure authentication failure limit reached! Please try again later!\n") return False # Grab the XSRF token received from the response, as stored in cookies. @@ -155,11 +147,7 @@ def login(self, username: str = "", password: str = "", token: str = "") -> bool return True @staticmethod - def get_endpoint( - path: str, - api_version: str = "latest", - host: str = "https://console.cloudendure.com", - ) -> str: + def get_endpoint(path: str, api_version: str = "latest", host: str = "https://console.cloudendure.com",) -> str: """Build the endpoint path. Returns: @@ -168,9 +156,7 @@ def get_endpoint( """ return f"{host}/api/{api_version}/{path}" - def api_call( - self, path: str, method: str = "get", data: Dict[str, Any] = None - ) -> Response: + def api_call(self, path: str, method: str = "get", data: Dict[str, Any] = None) -> Response: """Handle CloudEndure API calls based on the defined parameters. Args: @@ -194,9 +180,7 @@ def api_call( return Response() if method not in ["get", "delete"] and not data: - print( - "Paramater mismatch! If calling anything other than get or delete provide data!" - ) + print("Paramater mismatch! If calling anything other than get or delete provide data!") return Response() # Attempt to call the CloudEndure API. @@ -238,17 +222,13 @@ def get_projects(self, current_project: str = "") -> List[Any]: self.projects: List[Any] = projects if current_project: - return list( - filter(lambda project: project["name"] == current_project, projects) - ) + return list(filter(lambda project: project["name"] == current_project, projects)) return projects @classmethod def docs(self) -> str: """Open the CloudEndure API documentation page.""" - docs_url: str = os.environ.get( - "CLOUDENDURE_API_DOCS", "https://console.cloudendure.com/api_doc/apis.html" - ) + docs_url: str = os.environ.get("CLOUDENDURE_API_DOCS", "https://console.cloudendure.com/api_doc/apis.html") open_new_tab(docs_url) return docs_url diff --git a/cloudendure/cloudendure.py b/cloudendure/cloudendure.py index dc7890504..0f09e6daa 100644 --- a/cloudendure/cloudendure.py +++ b/cloudendure/cloudendure.py @@ -33,21 +33,14 @@ class CloudEndure: """Define the CloudEndure general object.""" def __init__( - self, - project_name: str = "", - dry_run: bool = False, - username: str = "", - password: str = "", - token: str = "", + self, project_name: str = "", dry_run: bool = False, username: str = "", password: str = "", token: str = "", ) -> None: """Initialize the CloudEndure object. This entrypoint is primarily for use with the CLI. """ - self.config: CloudEndureConfig = CloudEndureConfig( - username=username, password=password, token=token - ) + self.config: CloudEndureConfig = CloudEndureConfig(username=username, password=password, token=token) self.api: CloudEndureAPI = CloudEndureAPI(self.config) self.is_authenticated = self.api.login() @@ -64,32 +57,18 @@ def __init__( self.dry_run = dry_run self.event_handler: EventHandler = EventHandler() - self.destination_account: str = self.config.active_config.get( - "destination_account", "" - ) + self.destination_account: str = self.config.active_config.get("destination_account", "") self.destination_kms: str = self.config.active_config.get("destination_kms", "") - self.destination_role: str = self.config.active_config.get( - "destination_role", "" - ) + self.destination_role: str = self.config.active_config.get("destination_role", "") self.subnet_id: str = self.config.active_config.get("subnet_id", "") - self.private_ip_action: str = self.config.active_config.get( - "private_ip_action", "" - ) + self.private_ip_action: str = self.config.active_config.get("private_ip_action", "") if self.subnet_id and not self.private_ip_action: self.private_ip_action = "CREATE_NEW" - self.security_group_id: str = self.config.active_config.get( - "security_group_id", "" - ) - self.target_machines: List[str] = self.config.active_config.get( - "machines", "" - ).split(",") - self.target_instance_types: List[str] = self.config.active_config.get( - "instance_types", "" - ).split(",") + self.security_group_id: str = self.config.active_config.get("security_group_id", "") + self.target_machines: List[str] = self.config.active_config.get("machines", "").split(",") + self.target_instance_types: List[str] = self.config.active_config.get("instance_types", "").split(",") if len(self.target_machines) == len(self.target_instance_types): - self.target_instances: Dict[str, str] = dict( - zip(self.target_machines, self.target_instance_types) - ) + self.target_instances: Dict[str, str] = dict(zip(self.target_machines, self.target_instance_types)) else: print( "WARNING: Misconfiguration of CLOUDENDURE_INSTANCE_TYPES and CLOUDENDURE_MACHINES. These should be the same length!" @@ -104,9 +83,7 @@ def __init__( self.max_lag_ttl: int = self.config.active_config.get("max_lag_ttl", 90) if not self.is_authenticated: - print( - "Failed to authenticate with CloudEndure! Please check your credentials and try again!" - ) + print("Failed to authenticate with CloudEndure! Please check your credentials and try again!") def __repr__(self) -> str: """Provide the representation for the CloudEndure object. @@ -131,9 +108,7 @@ def _get_role_credentials(self, name: str, role: str) -> Dict[str, Any]: _sts_client: boto_client = boto3.client("sts") print(f"Assuming role: {role}") - assumed_role: Dict[str, Any] = _sts_client.assume_role( - RoleArn=self.destination_role, RoleSessionName=name - ) + assumed_role: Dict[str, Any] = _sts_client.assume_role(RoleArn=self.destination_role, RoleSessionName=name) return assumed_role.get("Credentials", {}) def get_project_id(self, project_name: str = "") -> str: @@ -158,9 +133,7 @@ def get_project_id(self, project_name: str = "") -> str: return self.project_id if not self.project_name and not project_name: - print( - "No project name provided! Please add a project_name to the configuration and re-run!" - ) + print("No project name provided! Please add a project_name to the configuration and re-run!") raise CloudEndureMisconfigured() if not project_name: @@ -176,8 +149,7 @@ def get_project_id(self, project_name: str = "") -> str: # Get Project ID projects: List[Any] = json.loads(projects_result.text).get("items", []) found_project: Dict[Any, Any] = next( - (item for item in projects if item.get("name", "NONE") == project_name), - {}, + (item for item in projects if item.get("name", "NONE") == project_name), {}, ) project_id: str = found_project.get("id", "") except Exception as e: @@ -196,9 +168,7 @@ def get_cloud(self, cloud_type: str = "") -> str: return cloud.get("id", "") return "" - def create_cloud_credentials( - self, access_key: str = "", secret_key: str = "" - ) -> str: + def create_cloud_credentials(self, access_key: str = "", secret_key: str = "") -> str: """Create a new CloudEndure project. Args: @@ -216,9 +186,7 @@ def create_cloud_credentials( "cloudId": self.get_cloud(), } - cloud_cred_result: Response = self.api.api_call( - "cloudCredentials", method="post", data=_project - ) + cloud_cred_result: Response = self.api.api_call("cloudCredentials", method="post", data=_project) if cloud_cred_result.status_code != 201: print( f"Failed to create the new cloud credentials: ({access_key}): " @@ -259,23 +227,17 @@ def create_repl_config(self, region: str = "", cloud_cred_id: str = ""): # "volumeEncryptionKey": "" } - credentials_response: Response = self.api.api_call( - f"cloudCredentials/{cloud_cred_id}/regions" - ) + credentials_response: Response = self.api.api_call(f"cloudCredentials/{cloud_cred_id}/regions") for _region in json.loads(credentials_response.content).get("items", []): if _region["name"] == regions.get(region, "N/A"): payload["region"] = _region["id"] print(payload) repl_result: Response = self.api.api_call( - f"projects/{self.project_id}/replicationConfigurations", - method="post", - data=payload, + f"projects/{self.project_id}/replicationConfigurations", method="post", data=payload, ) if repl_result.status_code != 201: - print( - f"Failed to create replication configuration {repl_result.status_code} {repl_result.content}" - ) + print(f"Failed to create replication configuration {repl_result.status_code} {repl_result.content}") return "" print("Replication configuration was created successfully") return json.loads(repl_result.content).get("id", "") @@ -290,12 +252,8 @@ def get_repl_configs(self) -> List[Any]: list of dict: The CloudEndure replication configuration dictionary mapping. """ - print( - f"Fetching Replication Configuration - Project Name: ({self.project_name})" - ) - repl_config_results: Response = self.api.api_call( - f"projects/{self.project_id}/replicationConfigurations" - ) + print(f"Fetching Replication Configuration - Project Name: ({self.project_name})") + repl_config_results: Response = self.api.api_call(f"projects/{self.project_id}/replicationConfigurations") if repl_config_results.status_code != 200: print( @@ -306,9 +264,7 @@ def get_repl_configs(self) -> List[Any]: return [] repl_configs = json.loads(repl_config_results.content).get("items", []) - print( - f"Successfully fetched replication configurations for project: {self.project_name}" - ) + print(f"Successfully fetched replication configurations for project: {self.project_name}") return repl_configs def create_project(self, project_name: str) -> str: @@ -335,9 +291,7 @@ def create_project(self, project_name: str) -> str: print(license_id) project["licensesIDs"].append(license_id) - projects_result: Response = self.api.api_call( - "projects", method="post", data=project - ) + projects_result: Response = self.api.api_call("projects", method="post", data=project) if projects_result.status_code != 201: print( f"Failed to create the new project ({self.project_name}): " @@ -360,9 +314,7 @@ def update_project(self, project_data: Dict[str, Any] = None) -> bool: """ print(f"Updating Project - Name: ({self.project_name})") - projects_result: Response = self.api.api_call( - f"projects/{self.project_id}", method="patch", data=project_data - ) + projects_result: Response = self.api.api_call(f"projects/{self.project_id}", method="patch", data=project_data) if projects_result.status_code != 200: print( @@ -376,9 +328,7 @@ def update_project(self, project_data: Dict[str, Any] = None) -> bool: def check(self) -> bool: """Check the status of machines in the provided project.""" - print( - f"Checking Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})" - ) + print(f"Checking Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})") projects_response: Response = self.api.api_call("projects") @@ -387,49 +337,35 @@ def check(self) -> bool: raise CloudEndureHTTPException("Failed to fetch the CloudEndure Project!") machine_status: int = 0 - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") for _machine in self.target_machines: machine_exist: bool = False for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) - ref_name: str = source_props.get("name") or source_props.get( + ref_name: str = source_props.get("name") or source_props.get("machineCloudId", "NONE") + if _machine == source_props.get("name", "NONE") or _machine == source_props.get( "machineCloudId", "NONE" - ) - if _machine == source_props.get( - "name", "NONE" - ) or _machine == source_props.get("machineCloudId", "NONE"): + ): machine_exist = True if "lastConsistencyDateTime" not in machine["replicationInfo"]: - print( - f"{ref_name} replication into the migration account in progress!" - ) + print(f"{ref_name} replication into the migration account in progress!") else: replica: str = machine.get("replica") if replica: machine_status += 1 - print( - f"{ref_name} has been launched in the migration account: {replica}" - ) + print(f"{ref_name} has been launched in the migration account: {replica}") else: - print( - f"{ref_name} has not been launched or is preparing launch in the migration account" - ) + print(f"{ref_name} has not been launched or is preparing launch in the migration account") if not machine_exist: print(f"ERROR: Machine: {_machine} does not exist!") if machine_status == len(self.target_machines): - print( - "All machines specified in CLOUDENDURE_MACHINES have been launched in the migration account" - ) + print("All machines specified in CLOUDENDURE_MACHINES have been launched in the migration account") else: - print( - "Some machines have not yet completed launching in the migration account" - ) + print("Some machines have not yet completed launching in the migration account") return True def update_encryption_key(self, kms_id: str) -> bool: @@ -447,20 +383,14 @@ def update_encryption_key(self, kms_id: str) -> bool: bool: Whether or not the encryption key was updated. """ - print( - f"Updating Encryption Key - Name: ({self.project_name}) - KMS ID: ({kms_id}) - Dry Run: ({self.dry_run})" - ) + print(f"Updating Encryption Key - Name: ({self.project_name}) - KMS ID: ({kms_id}) - Dry Run: ({self.dry_run})") - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) name: str = source_props.get("name") machine_id: str = machine.get("id") - replication_config: Dict[str, Any] = machine.get( - "replicationConfiguration", {} - ) + replication_config: Dict[str, Any] = machine.get("replicationConfiguration", {}) if replication_config.get("volumeEncryptionKey") == kms_id: print(f"Key matches {name}") @@ -473,14 +403,10 @@ def update_encryption_key(self, kms_id: str) -> bool: replication_config["volumeEncryptionKey"] = kms_id endpoint: str = f"projects/{self.project_id}/machines/{machine_id}" print(f"sending to {endpoint}") - result: Response = self.api.api_call( - endpoint, method="patch", data=json.dumps(machine) - ) + result: Response = self.api.api_call(endpoint, method="patch", data=json.dumps(machine)) if result.status_code != 200: - print( - f"Key update failure encountered for machine: {name} - {result.status_code}\n{result.json()}" - ) + print(f"Key update failure encountered for machine: {name} - {result.status_code}\n{result.json()}") else: print(f"Key updated for or machine: {name}") @@ -495,18 +421,14 @@ def check_licenses(self) -> Dict[str, Any]: expirationday: timedelta = datetime.timedelta(days=90) expirationwarn: timedelta = datetime.timedelta(days=60) machine_data_dict: Dict[str, Any] = {} - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) machine_id: str = machine.get("id") machine_name: str = source_props.get("name") license_data: Dict[str, Any] = machine.get("license", {}) license_use: str = license_data.get("startOfUseDateTime") - license_date: datetime = datetime.datetime.strptime( - license_use, "%Y-%m-%dT%H:%M:%S.%f%z" - ) + license_date: datetime = datetime.datetime.strptime(license_use, "%Y-%m-%dT%H:%M:%S.%f%z") delta: timedelta = now - license_date if expirationday < delta: response_dict[machine_name] = { @@ -525,31 +447,22 @@ def check_licenses(self) -> Dict[str, Any]: def update_blueprint(self) -> bool: """Update the blueprint associated with the specified machines.""" - print( - f"Updating CloudEndure Blueprints - Name: ({self.project_name}) - Dry Run: ({self.dry_run})" - ) + print(f"Updating CloudEndure Blueprints - Name: ({self.project_name}) - Dry Run: ({self.dry_run})") machine_data_dict: Dict[str, Any] = {} - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) machine_id: str = machine.get("id") machine_name: str = source_props.get("name") - if ( - machine_name in self.target_machines - or machine_name.upper() in self.target_machines - ): + if machine_name in self.target_machines or machine_name.upper() in self.target_machines: machine_data_dict[machine_id] = machine_name if not machine_data_dict: print("No Machines Found!") return False try: - blueprints_response = self.api.api_call( - f"projects/{self.project_id}/blueprints" - ) + blueprints_response = self.api.api_call(f"projects/{self.project_id}/blueprints") for blueprint in json.loads(blueprints_response.text).get("items", []): _machine_id: str = blueprint.get("machineId", "") _machine_name: str = machine_data_dict.get(_machine_id, "") @@ -562,10 +475,7 @@ def update_blueprint(self) -> bool: new_disks = [] for disk in blueprint["disks"]: new_disks.append( - { - "type": self.config.active_config.get("disk_type", "SSD"), - "name": disk.get("name", ""), - } + {"type": self.config.active_config.get("disk_type", "SSD"), "name": disk.get("name", ""),} ) blueprint["disks"] = new_disks @@ -583,27 +493,18 @@ def update_blueprint(self) -> bool: # Update machine tags blueprint["tags"] = [ - { - "key": "CloneStatus", - "value": self.config.active_config.get( - "clone_status", "NOT_STARTED" - ), - }, + {"key": "CloneStatus", "value": self.config.active_config.get("clone_status", "NOT_STARTED"),}, {"key": "MigrationWave", "value": self.migration_wave}, {"key": "DestinationAccount", "value": self.destination_account}, {"key": "DestinationKMS", "value": self.destination_kms}, {"key": "DestinationRole", "value": self.destination_role}, ] - blueprint["publicIPAction"] = self.config.active_config.get( - "public_ip", "DONT_ALLOCATE" - ) + blueprint["publicIPAction"] = self.config.active_config.get("public_ip", "DONT_ALLOCATE") if self.dry_run: print("This is a dry run! Not updating blueprints!") return True - result: Response = self.api.api_call( - _endpoint, method="patch", data=json.dumps(blueprint) - ) + result: Response = self.api.api_call(_endpoint, method="patch", data=json.dumps(blueprint)) if result.status_code != 200: print( @@ -622,17 +523,13 @@ def launch(self) -> Dict[str, Any]: """Launch the test target instances.""" response_dict: Dict[str, Any] = {} - print( - f"Launching Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})" - ) + print(f"Launching Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})") if self.dry_run: print("This is a dry run! Not launching any machines!") return response_dict - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") for _machine in self.target_machines: for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) @@ -640,9 +537,7 @@ def launch(self) -> Dict[str, Any]: if _machine == source_props.get("name", "NONE"): if machine.get("replica"): print("Target machine already launched") - self.event_handler.add_event( - Event.EVENT_ALREADY_LAUNCHED, machine_name=_machine - ) + self.event_handler.add_event(Event.EVENT_ALREADY_LAUNCHED, machine_name=_machine) continue machine_data = { "items": [{"machineId": machine["id"]}], @@ -651,60 +546,38 @@ def launch(self) -> Dict[str, Any]: if machine_data: result: Response = self.api.api_call( - f"projects/{self.project_id}/launchMachines", - method="post", - data=json.dumps(machine_data), + f"projects/{self.project_id}/launchMachines", method="post", data=json.dumps(machine_data), ) if result.status_code == 202: - response_dict["original_id"] = source_props.get( - "machineCloudId", "NONE" - ) + response_dict["original_id"] = source_props.get("machineCloudId", "NONE") response_dict.update(json.loads(result.text)) print("Test Job created for machine ", _machine) - self.event_handler.add_event( - Event.EVENT_SUCCESSFULLY_LAUNCHED, machine_name=_machine - ) + self.event_handler.add_event(Event.EVENT_SUCCESSFULLY_LAUNCHED, machine_name=_machine) elif result.status_code == 409: print(f"ERROR: ({_machine}) is currently in progress!") - self.event_handler.add_event( - Event.EVENT_IN_PROGRESS, machine_name=_machine - ) + self.event_handler.add_event(Event.EVENT_IN_PROGRESS, machine_name=_machine) elif result.status_code == 402: print("ERROR: Project license has expired!") - self.event_handler.add_event( - Event.EVENT_EXPIRED, machine_name=_machine - ) + self.event_handler.add_event(Event.EVENT_EXPIRED, machine_name=_machine) else: print("ERROR: Launch target machine failed!") - self.event_handler.add_event( - Event.EVENT_FAILED, machine_name=_machine - ) + self.event_handler.add_event(Event.EVENT_FAILED, machine_name=_machine) else: - print( - f"Machine: ({source_props['name']}) - Not a machine we want to launch..." - ) - self.event_handler.add_event( - Event.EVENT_IGNORED, machine_name=_machine - ) + print(f"Machine: ({source_props['name']}) - Not a machine we want to launch...") + self.event_handler.add_event(Event.EVENT_IGNORED, machine_name=_machine) return response_dict def status(self) -> bool: """Get the status of machines in the current wave.""" - print( - f"Getting Status of Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})" - ) + print(f"Getting Status of Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})") machine_status: int = 0 - machines_response: Response = self.api.api_call( - f"projects/{self.project_name}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_name}/machines") for _machine in self.target_machines: machine_exist: bool = False for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) - ref_name: str = source_props.get("name") or source_props.get( - "machineCloudId", "NONE" - ) + ref_name: str = source_props.get("name") or source_props.get("machineCloudId", "NONE") if ref_name == source_props.get("name", "NONE"): machine_exist = True # Check if replication is done @@ -714,24 +587,14 @@ def status(self) -> bool: else: # Check the replication lag between source and CE destination. last_consistent_dt_1: int = int( - machine.get("replicationInfo", {}).get( - "lastConsistencyDateTime", "" - )[11:13] + machine.get("replicationInfo", {}).get("lastConsistencyDateTime", "")[11:13] ) last_consistent_dt_2: int = int( - machine.get("replicationInfo", {}).get( - "lastConsistencyDateTime", "" - )[14:16] - ) - datetime_1: int = int( - datetime.datetime.utcnow().isoformat()[11:13] - ) - datetime_2: int = int( - datetime.datetime.utcnow().isoformat()[14:16] - ) - result: int = (datetime_1 - last_consistent_dt_1) * 60 + ( - datetime_2 - last_consistent_dt_2 + machine.get("replicationInfo", {}).get("lastConsistencyDateTime", "")[14:16] ) + datetime_1: int = int(datetime.datetime.utcnow().isoformat()[11:13]) + datetime_2: int = int(datetime.datetime.utcnow().isoformat()[14:16]) + result: int = (datetime_1 - last_consistent_dt_1) * 60 + (datetime_2 - last_consistent_dt_2) if result > self.max_lag_ttl: print( f"{ref_name} is currently lagging greater than {self.max_lag_ttl} minutes - ({result})" @@ -742,18 +605,14 @@ def status(self) -> bool: machine_status += 1 else: # Check whether or not the target machine has already been tested. - _m_life_cycle: Dict[str, Any] = machine.get( - "lifeCycle", {} - ) + _m_life_cycle: Dict[str, Any] = machine.get("lifeCycle", {}) if ( "lastTestLaunchDateTime" not in _m_life_cycle and "lastCutoverDateTime" not in _m_life_cycle ): machine_status += 1 else: - print( - f"{ref_name} has already been tested - you can create AMIs now!" - ) + print(f"{ref_name} has already been tested - you can create AMIs now!") return False if not machine_exist: print(f"ERROR: Machine: {_machine} does not exist!") @@ -768,9 +627,7 @@ def status(self) -> bool: def execute(self) -> bool: """Start the migration project my checking and launching the migration wave.""" - print( - f"Executing Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})" - ) + print(f"Executing Project - Name: ({self.project_name}) - Dry Run: ({self.dry_run})") projects_result: Response = self.api.api_call("projects") if projects_result.status_code != 200: @@ -779,9 +636,7 @@ def execute(self) -> bool: try: # Get Machine List - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") if "sourceProperties" not in machines_response.text: print("Failed to fetch the machines!") return False @@ -793,9 +648,7 @@ def execute(self) -> bool: print("No source properties found!") continue - ref_name = source_props.get("name") or source_props.get( - "machineCloudId", "NONE" - ) + ref_name = source_props.get("name") or source_props.get("machineCloudId", "NONE") _machine_id: str = source_props.get("id", "") print(f"Machine name: {ref_name}, Machine ID: {_machine_id}") machine_data_dict[machine["id"]] = ref_name @@ -820,18 +673,14 @@ def replication(self, action: str, machine_ids: str = "") -> bool: if not _machines: _machines = self.target_machines - print( - f"No machines provided. Defaulting to project machines: ({_machines})" - ) + print(f"No machines provided. Defaulting to project machines: ({_machines})") replication_data: Dict[str, Any] = {"machineId": _machines} if action not in ["pause", "stop", "start"]: return False replication_results: Response = self.api.api_call( - f"projects/{self.project_id}/{action}Replication", - method="post", - data=replication_data, + f"projects/{self.project_id}/{action}Replication", method="post", data=replication_data, ) if replication_results.status_code != 200: @@ -873,9 +722,7 @@ def share_image(self, image_id: str, image_name: str = "CloudEndureImage") -> bo try: snapshot.modify_attribute( Attribute="createVolumePermission", - CreateVolumePermission={ - "Add": [{"UserId": self.destination_account}] - }, + CreateVolumePermission={"Add": [{"UserId": self.destination_account}]}, OperationType="add", ) except Exception as e: @@ -903,9 +750,7 @@ def create_ami(self) -> Dict[str, str]: _ec2_client: boto_client = boto3.client("ec2", AWS_REGION) # Create an AMI from the migrated instance - image_creation_time: str = datetime.datetime.utcnow().strftime( - "%Y%m%d%H%M%S" - ) + image_creation_time: str = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") instances: Dict[str, Any] = _ec2_client.describe_instances( Filters=[ {"Name": "tag:MigrationWave", "Values": [self.migration_wave]}, @@ -914,9 +759,7 @@ def create_ami(self) -> Dict[str, str]: ) if not instances or not instances.get("Reservations", []): - print( - f"No instances or reservations found for migration wave: {self.migration_wave}" - ) + print(f"No instances or reservations found for migration wave: {self.migration_wave}") return amis for reservation in instances.get("Reservations", []): @@ -928,14 +771,10 @@ def create_ami(self) -> Dict[str, str]: Description=f"{self.project_name} - {self.project_id} - {instance_id} - {image_creation_time}", NoReboot=True, ) - _filters: List[Any] = [ - {"Name": "resource-id", "Values": [instance_id]} - ] + _filters: List[Any] = [{"Name": "resource-id", "Values": [instance_id]}] # Tag the newly created AMI by getting the tags of the migrated instance to copy to the AMI. - ec2_tags: Dict[str, Any] = _ec2_client.describe_tags( - Filters=_filters - ) + ec2_tags: Dict[str, Any] = _ec2_client.describe_tags(Filters=_filters) name: str = instance_id for tag in ec2_tags["Tags"]: @@ -943,17 +782,13 @@ def create_ami(self) -> Dict[str, str]: name = tag["Value"] _ec2_client.create_tags( - Resources=[ec2_image["ImageId"]], - Tags=[{"Key": tag["Key"], "Value": tag["Value"]}], + Resources=[ec2_image["ImageId"]], Tags=[{"Key": tag["Key"], "Value": tag["Value"]}], ) _ec2_client.create_tags( - Resources=[instance_id], - Tags=[{"Key": "CloneStatus", "Value": "IMAGE_CREATED"}], - ) - _ec2_client.delete_tags( - Resources=[ec2_image["ImageId"]], Tags=[{"Key": "CloneStatus"}] + Resources=[instance_id], Tags=[{"Key": "CloneStatus", "Value": "IMAGE_CREATED"}], ) + _ec2_client.delete_tags(Resources=[ec2_image["ImageId"]], Tags=[{"Key": "CloneStatus"}]) amis[name] = ec2_image["ImageId"] print(f"Instance ID: ({instance_id}) - AMI ID: ({ec2_image})") @@ -1003,9 +838,7 @@ def split_image(self, image_id: str) -> Dict[str, Any]: dict: The mapping of AWS EBS block devices. """ - print( - f"Loading EC2 resource for region: {AWS_REGION} using role: {self.destination_role}" - ) + print(f"Loading EC2 resource for region: {AWS_REGION} using role: {self.destination_role}") credentials = self._get_role_credentials("SplitImage", self.destination_role) _ec2_res: boto_client = boto3.resource( @@ -1049,8 +882,7 @@ def split_image(self, image_id: str) -> Dict[str, Any]: for drive in drives: print(drives[drive]) _ec2_res.create_tags( - Resources=[root_ami], - Tags=[{"Key": f"Drive-{drive}", "Value": json.dumps(drives[drive])}], + Resources=[root_ami], Tags=[{"Key": f"Drive-{drive}", "Value": json.dumps(drives[drive])}], ) # remove the old image @@ -1133,9 +965,7 @@ def gen_terraform( "volume_type": drive_info.get("VolumeType", "gp2"), } - drive_template = TerraformTemplate.VOLUME_TEMPLATE.format( - **drive_template_data - ) + drive_template = TerraformTemplate.VOLUME_TEMPLATE.format(**drive_template_data) template = template + drive_template return template @@ -1146,31 +976,23 @@ def terminate(self) -> bool: bool: Whether cleanup was successful. """ - machines_response: Response = self.api.api_call( - f"projects/{self.project_id}/machines" - ) + machines_response: Response = self.api.api_call(f"projects/{self.project_id}/machines") success = True for _machine in self.target_machines: for machine in json.loads(machines_response.text).get("items", []): source_props: Dict[str, Any] = machine.get("sourceProperties", {}) - ref_name: str = source_props.get("name") or source_props.get( + ref_name: str = source_props.get("name") or source_props.get("machineCloudId", "NONE") + if _machine == source_props.get("name", "NONE") or _machine == source_props.get( "machineCloudId", "NONE" - ) - if _machine == source_props.get( - "name", "NONE" - ) or _machine == source_props.get("machineCloudId", "NONE"): + ): replica: str = machine.get("replica") if replica: - print( - f"{ref_name} has a launched machine: {replica}. Terminating." - ) + print(f"{ref_name} has a launched machine: {replica}. Terminating.") data_dict: Dict[str, Any] = {} data_dict["replicaIDs"] = [replica] delete_response: Response = self.api.api_call( - path=f"projects/{self.project_id}/replicas", - method="delete", - data=json.dumps(data_dict), + path=f"projects/{self.project_id}/replicas", method="delete", data=json.dumps(data_dict), ) if delete_response.status_code != 202: print( @@ -1179,9 +1001,7 @@ def terminate(self) -> bool: ) success = False else: - print( - f"Terminated {ref_name}\n{json.loads(delete_response.text)}" - ) + print(f"Terminated {ref_name}\n{json.loads(delete_response.text)}") else: print(f"{ref_name} does not have a launched machine") success = False diff --git a/cloudendure/config.py b/cloudendure/config.py index 1ea2faf0c..2547d3940 100644 --- a/cloudendure/config.py +++ b/cloudendure/config.py @@ -43,14 +43,10 @@ class CloudEndureConfig: "instance_types": "", } - def __init__( - self, username: str = "", password: str = "", token: str = "", *args, **kwargs - ) -> None: + def __init__(self, username: str = "", password: str = "", token: str = "", *args, **kwargs) -> None: """Initialize the Environment.""" logger.info("Initializing the CloudEndure Configuration") - _config_path: str = os.environ.get( - "CLOUDENDURE_CONFIG_PATH", "~/.cloudendure.yaml" - ) + _config_path: str = os.environ.get("CLOUDENDURE_CONFIG_PATH", "~/.cloudendure.yaml") if _config_path.startswith("~"): self.config_path = os.path.expanduser(_config_path) else: @@ -61,9 +57,7 @@ def __init__( _config: PosixPath = Path(self.config_path) if not _config.exists(): - print( - f"No CloudEndure YAML configuration found! Creating it at: ({self.config_path})" - ) + print(f"No CloudEndure YAML configuration found! Creating it at: ({self.config_path})") self.write_yaml_config(config=self.BASE_CONFIG) self.update_config() @@ -99,8 +93,7 @@ def write_yaml_config(self, config: Dict[str, Any]) -> bool: return True except Exception as e: logger.error( - "Exception encountered while writing the CloudEndure YAML configuration file - (%s)", - e, + "Exception encountered while writing the CloudEndure YAML configuration file - (%s)", e, ) return False @@ -132,9 +125,7 @@ def update_config(self) -> None: """Update the configuration.""" self.yaml_config_contents: Dict[str, Any] = self.read_yaml_config() self.env_config = self.get_env_vars() - self.active_config = self.merge_config_dicts( - [self.yaml_config_contents, self.env_config, self.cli] - ) + self.active_config = self.merge_config_dicts([self.yaml_config_contents, self.env_config, self.cli]) def update_token(self, token: str) -> bool: """Update the CloudEndure token. diff --git a/cloudendure/events.py b/cloudendure/events.py index efbd4a8f5..101f5fda2 100644 --- a/cloudendure/events.py +++ b/cloudendure/events.py @@ -61,9 +61,7 @@ class Event: EVENT_ALREADY_LAUNCHED, ] - EVENT_TYPES: List[ - Tuple[str, str] - ] = ERRORRED_EVENT_TYPES + SUCCESSFUL_EVENT_TYPES + WARNED_EVENT_TYPES + EVENT_TYPES: List[Tuple[str, str]] = ERRORRED_EVENT_TYPES + SUCCESSFUL_EVENT_TYPES + WARNED_EVENT_TYPES def __init__(self, event_type, machine_name: str = "NA", **kwargs) -> None: """Initialize the Event.""" diff --git a/cloudendure/models.py b/cloudendure/models.py index 69ec06a4f..385f1903f 100644 --- a/cloudendure/models.py +++ b/cloudendure/models.py @@ -154,20 +154,14 @@ class Machine(CloudEndureModel): # Standard Schema "sourceProperties": { "name": "", - "installedApplications": { - "items": [{"applicationName": ""}], - "lastUpdatedDateTime": "", - }, + "installedApplications": {"items": [{"applicationName": ""}], "lastUpdatedDateTime": "",}, "disks": [{"isProtected": False, "name": "", "size": 0}], "machineCloudState": "", "publicIps": [], "memory": 0, "os": "", "cpu": [{"cores": 0, "modelName": ""}], - "runningServices": { - "items": [{"serviceName": ""}], - "lastUpdatedDateTime": "", - }, + "runningServices": {"items": [{"serviceName": ""}], "lastUpdatedDateTime": "",}, "machineCloudId": "", }, "replicationInfo": { @@ -176,12 +170,7 @@ class Machine(CloudEndureModel): "rescannedStorageBytes": 0, "backloggedStorageBytes": 0, "initiationStates": { - "items": [ - { - "steps": [{"status": "", "message": "", "name": ""}], - "startDateTime": "", - } - ], + "items": [{"steps": [{"status": "", "message": "", "name": ""}], "startDateTime": "",}], "estimatedNextAttemptDateTime": "", }, "replicatedStorageBytes": 0, diff --git a/cloudendure/templates.py b/cloudendure/templates.py index cfce736a8..70a906949 100644 --- a/cloudendure/templates.py +++ b/cloudendure/templates.py @@ -21,9 +21,7 @@ def run( print("Creating the cookiecutter repository subdirectory for: ", k) self.create_project(package_path=cookiecutter_path, context=v) - def create_project( - self, package_path: str, context: Dict[Any, Any] = None, no_input: bool = True - ) -> bool: + def create_project(self, package_path: str, context: Dict[Any, Any] = None, no_input: bool = True) -> bool: """Create a cookiecutter project with the provided details. Args: diff --git a/lambda/copy_ami.py b/lambda/copy_ami.py index 568eacb58..bff69c630 100644 --- a/lambda/copy_ami.py +++ b/lambda/copy_ami.py @@ -28,27 +28,20 @@ SESSION_NAME: str = os.environ.get("CLOUDENDURE_SESSION_NAME", "CloudEndureMigration") -def assume_role( - sts_role_arn: str = "", session_name: str = SESSION_NAME -) -> Dict[str, str]: +def assume_role(sts_role_arn: str = "", session_name: str = SESSION_NAME) -> Dict[str, str]: sts: boto_client = boto3.client("sts") try: - credentials: Dict[str, str] = sts.assume_role( - RoleArn=sts_role_arn, RoleSessionName=session_name - ).get("Credentials", {}) + credentials: Dict[str, str] = sts.assume_role(RoleArn=sts_role_arn, RoleSessionName=session_name).get( + "Credentials", {} + ) except Exception as e: logger.error( - "%s encountered while attempting to assume the Role ARN: (%s) during (%s)", - e, - sts_role_arn, - session_name, + "%s encountered while attempting to assume the Role ARN: (%s) during (%s)", e, sts_role_arn, session_name, ) if not credentials: - logger.error( - "Unable to assume role via STS! Please check permissions and try again." - ) + logger.error("Unable to assume role via STS! Please check permissions and try again.") return credentials @@ -110,9 +103,7 @@ def share_image(image_name: str = "CloudEndureImage") -> bool: # Copy the shared AMI to dest region try: - dest_ec2.copy_image( - Name=image_name, SourceImageId=image.id, SourceRegion=SRC_REGION - ) + dest_ec2.copy_image(Name=image_name, SourceImageId=image.id, SourceRegion=SRC_REGION) except Exception as e: logger.error(e) return False diff --git a/lambda/handler.py b/lambda/handler.py index 9b8b1ca5b..666199f40 100644 --- a/lambda/handler.py +++ b/lambda/handler.py @@ -95,8 +95,7 @@ def create_ami(project_id: str, instance_id: str) -> bool: logger.info(ec2_tags) for tag in ec2_tags["Tags"]: _ec2_client.create_tags( - Resources=[ec2_image["ImageId"]], - Tags=[{"Key": tag["Key"], "Value": tag["Value"]}], + Resources=[ec2_image["ImageId"]], Tags=[{"Key": tag["Key"], "Value": tag["Value"]}], ) send_sqs_message(ec2_image) @@ -135,13 +134,9 @@ def lambda_handler(event: Dict[str, Any], context: Dict[str, Any]) -> bool: project_id: str = json_sns_message.get("projectId", "") if json_sns_message.get("Pass", "NA") != "True": - raise InvalidPayload( - f"{instance_id} did not pass post migration testing! Not creating an AMI." - ) + raise InvalidPayload(f"{instance_id} did not pass post migration testing! Not creating an AMI.") else: - logger.info( - "%s passed post migration testing. Creating an AMI." % (instance_id) - ) + logger.info("%s passed post migration testing. Creating an AMI." % (instance_id)) create_ami(project_id, instance_id) except ClientError as e: logger.error(e.response) diff --git a/pyproject.toml b/pyproject.toml index a9b22e949..4471fd82b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -168,7 +168,24 @@ DISABLE_ENDING_COMMA_HEURISTIC = true [tool.black] line-length = 120 -target-version = "py38" +target-version = ["py38"] +exclude = ''' +( + /( + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + )/ + | cloudendure_api/ +) +''' [tool.pylint] line_length = 120 diff --git a/step/lambdas/copy_image.py b/step/lambdas/copy_image.py index 09a6b45be..d235da0f0 100644 --- a/step/lambdas/copy_image.py +++ b/step/lambdas/copy_image.py @@ -33,9 +33,7 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: sts_client = boto3.client("sts") print(f"Assuming role: {role}") - assumed_role: Dict[str, Any] = sts_client.assume_role( - RoleArn=role, RoleSessionName="CopyImageLambda" - ) + assumed_role: Dict[str, Any] = sts_client.assume_role(RoleArn=role, RoleSessionName="CopyImageLambda") credentials = assumed_role.get("Credentials") @@ -59,7 +57,5 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: print(e) return "" - MigrationStateHandler().update_state( - state="IMAGE_COPYING", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="IMAGE_COPYING", machine_name=instance_name) return new_image.get("ImageId", "") diff --git a/step/lambdas/create_image.py b/step/lambdas/create_image.py index 346f9f456..732310ec0 100644 --- a/step/lambdas/create_image.py +++ b/step/lambdas/create_image.py @@ -33,8 +33,7 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: instance = ec2_resource.Instance(instance_id) ec2_image = instance.create_image( - Name=f"{instance_id}-{image_creation_time}", - Description=f"Created image for {instance_id}", + Name=f"{instance_id}-{image_creation_time}", Description=f"Created image for {instance_id}", ) for tag in instance.tags: diff --git a/step/lambdas/find_instance.py b/step/lambdas/find_instance.py index 6480b1d0c..60bc7c13b 100644 --- a/step/lambdas/find_instance.py +++ b/step/lambdas/find_instance.py @@ -72,7 +72,5 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: print(e) event_dict["instance_id"] = "not-found" - MigrationStateHandler().update_state( - state="INSTANCE_LAUNCHED", machine_name=event_dict.get("name") - ) + MigrationStateHandler().update_state(state="INSTANCE_LAUNCHED", machine_name=event_dict.get("name")) return event_dict diff --git a/step/lambdas/get_copy_status.py b/step/lambdas/get_copy_status.py index 9773601ec..23aafcfa9 100644 --- a/step/lambdas/get_copy_status.py +++ b/step/lambdas/get_copy_status.py @@ -34,9 +34,7 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: sts_client = boto3.client("sts") print(f"Assuming role: {role}") - assumed_role: Dict[str, Any] = sts_client.assume_role( - RoleArn=role, RoleSessionName="GetCopyStatusLambda" - ) + assumed_role: Dict[str, Any] = sts_client.assume_role(RoleArn=role, RoleSessionName="GetCopyStatusLambda") credentials = assumed_role.get("Credentials") @@ -60,8 +58,6 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: state = images[0].get("State") if state and state == "available": - MigrationStateHandler().update_state( - state="IMAGE_COPIED", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="IMAGE_COPIED", machine_name=instance_name) return state diff --git a/step/lambdas/get_image_status.py b/step/lambdas/get_image_status.py index c87ec9bd0..b22627f28 100644 --- a/step/lambdas/get_image_status.py +++ b/step/lambdas/get_image_status.py @@ -33,8 +33,6 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: state = ami_state["Images"][0]["State"] if state == "available": - MigrationStateHandler().update_state( - state="IMAGE_CREATED", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="IMAGE_CREATED", machine_name=instance_name) return state diff --git a/step/lambdas/get_instance_status.py b/step/lambdas/get_instance_status.py index ae5ed5bda..24125ae72 100644 --- a/step/lambdas/get_instance_status.py +++ b/step/lambdas/get_instance_status.py @@ -49,8 +49,6 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: state = "system_failed" if state == "running": - MigrationStateHandler().update_state( - state="INSTANCE_READY", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="INSTANCE_READY", machine_name=instance_name) return state diff --git a/step/lambdas/image_cleanup.py b/step/lambdas/image_cleanup.py index f9449ac9d..015d873e8 100644 --- a/step/lambdas/image_cleanup.py +++ b/step/lambdas/image_cleanup.py @@ -46,8 +46,6 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> bool: print(f"Failed. AMI may not exist.\n{str(e)}") return False - MigrationStateHandler().update_state( - state="IMAGE_READY", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="IMAGE_READY", machine_name=instance_name) return True diff --git a/step/lambdas/migrationstate.py b/step/lambdas/migrationstate.py index 629ffbfe5..7f335b5d0 100644 --- a/step/lambdas/migrationstate.py +++ b/step/lambdas/migrationstate.py @@ -20,11 +20,7 @@ def update_state(self, state: str, machine_name: str, **kwargs) -> bool: print("Event queue: " + os.environ.get("event_queue")) queue_url = os.environ.get("event_queue") state_obj = MigrationState(state, machine_name, **kwargs) - print( - sqs.send_message( - QueueUrl=queue_url, MessageBody=json.dumps(state_obj.state_dict) - ) - ) + print(sqs.send_message(QueueUrl=queue_url, MessageBody=json.dumps(state_obj.state_dict))) class MigrationException(Exception): diff --git a/step/lambdas/share_image.py b/step/lambdas/share_image.py index 29cb29c33..a15c639a3 100644 --- a/step/lambdas/share_image.py +++ b/step/lambdas/share_image.py @@ -59,7 +59,5 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> bool: print(e) return False - MigrationStateHandler().update_state( - state="IMAGE_SHARED", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="IMAGE_SHARED", machine_name=instance_name) return True diff --git a/step/lambdas/split_image.py b/step/lambdas/split_image.py index 7604f5951..76aba846f 100644 --- a/step/lambdas/split_image.py +++ b/step/lambdas/split_image.py @@ -32,9 +32,7 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: sts_client = boto3.client("sts") print(f"Assuming role: {role}") - assumed_role: Dict[str, Any] = sts_client.assume_role( - RoleArn=role, RoleSessionName="SplitImageLambda" - ) + assumed_role: Dict[str, Any] = sts_client.assume_role(RoleArn=role, RoleSessionName="SplitImageLambda") credentials = assumed_role.get("Credentials") @@ -80,8 +78,7 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: for drive in drives: print(drives[drive]) ec2_res.create_tags( - Resources=[root_ami], - Tags=[{"Key": f"Drive-{drive}", "Value": json.dumps(drives[drive])}], + Resources=[root_ami], Tags=[{"Key": f"Drive-{drive}", "Value": json.dumps(drives[drive])}], ) # clean up original image @@ -90,8 +87,6 @@ def lambda_handler(event: Dict[str, Any], context: Any) -> str: print(e) return "" - MigrationStateHandler().update_state( - state="IMAGE_SPLIT", machine_name=instance_name - ) + MigrationStateHandler().update_state(state="IMAGE_SPLIT", machine_name=instance_name) return root_ami