diff --git a/tests/commands/test_terraform.py b/tests/commands/test_terraform.py index 1bb39c3..928113b 100644 --- a/tests/commands/test_terraform.py +++ b/tests/commands/test_terraform.py @@ -230,7 +230,7 @@ def test_worker_options(self, tf_13cmd_options): # ) def test_no_create_backend_bucket_fails_gcs(self, grootc_no_create_backend_bucket): - with pytest.raises(BackendError): + with pytest.raises(SystemExit): with mock.patch( "tfworker.commands.base.BaseCommand.get_terraform_version", side_effect=lambda x: (13, 3), diff --git a/tests/util/test_copier.py b/tests/util/test_copier.py index 4fe5b76..c58ac30 100644 --- a/tests/util/test_copier.py +++ b/tests/util/test_copier.py @@ -33,6 +33,7 @@ else: C_ROOT_PATH = "/tmp/test/" + @pytest.fixture(scope="session") def register_test_copier(): @CopyFactory.register("testfixture") @@ -342,7 +343,9 @@ def test_local_path(self): # Ensure file not found error is raised on invalid relative path with pytest.raises(FileNotFoundError): - FileSystemCopier(source="some/invalid/path", root_path=os.getcwd()).local_path + FileSystemCopier( + source="some/invalid/path", root_path=os.getcwd() + ).local_path # Ensure file not found error is raised on invalid absolute path with pytest.raises(FileNotFoundError): @@ -357,10 +360,7 @@ def test_type_match(self, request): # this should return true because the source is a valid directory assert FileSystemCopier.type_match(source) is True # this should return false because the full path to source does not exist inside of root_path - assert ( - FileSystemCopier.type_match("/some/invalid/path") - is False - ) + assert FileSystemCopier.type_match("/some/invalid/path") is False # this should return true because the full path to source exists inside of root_path assert ( FileSystemCopier.type_match( diff --git a/tfworker/backends/__init__.py b/tfworker/backends/__init__.py index 11ca1a9..bcb5e4c 100644 --- a/tfworker/backends/__init__.py +++ b/tfworker/backends/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .base import Backends +from .base import BackendError, Backends, BaseBackend # noqa from .gcs import GCSBackend # noqa from .s3 import S3Backend # noqa diff --git a/tfworker/backends/base.py b/tfworker/backends/base.py index e08e79d..6fa40cc 100644 --- a/tfworker/backends/base.py +++ b/tfworker/backends/base.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from abc import ABCMeta, abstractmethod +from abc import ABCMeta, abstractmethod, abstractproperty from tfworker import JSONType @@ -22,6 +22,7 @@ class BackendError(Exception): class BaseBackend(metaclass=ABCMeta): + plan_storage = False tag = "base" @abstractmethod @@ -40,6 +41,10 @@ def clean(self, deployment: str, limit: tuple) -> str: def remotes(self) -> list: pass + @property + def handlers(self) -> dict: + return {} + class Backends: s3 = "s3" diff --git a/tfworker/backends/gcs.py b/tfworker/backends/gcs.py index b2e47c9..a55be1f 100644 --- a/tfworker/backends/gcs.py +++ b/tfworker/backends/gcs.py @@ -16,6 +16,7 @@ import click from google.api_core import page_iterator +from google.auth.exceptions import DefaultCredentialsError from google.cloud import storage from google.cloud.exceptions import Conflict, NotFound @@ -39,14 +40,17 @@ def __init__(self, authenticators, definitions, deployment=None): if not self._gcs_prefix.endswith("/"): self._gcs_prefix = f"{self._gcs_prefix}/" - if self._authenticator.creds_path: - self._storage_client = storage.Client.from_service_account_json( - self._authenticator.creds_path - ) - else: - self._storage_client = storage.Client( - project=self._authenticator.project - ) + try: + if self._authenticator.creds_path: + self._storage_client = storage.Client.from_service_account_json( + self._authenticator.creds_path + ) + else: + self._storage_client = storage.Client( + project=self._authenticator.project + ) + except DefaultCredentialsError as e: + raise BackendError(f"Unable to authenticate to GCS, error: {str(e)}") try: self._storage_client.get_bucket(self._gcs_bucket) @@ -96,7 +100,7 @@ def _clean_prefix(self, prefix: str) -> None: raise BackendError(f"state file at: {b.name} is not empty") def remotes(self) -> list: - """ this is unimplemented here """ + """this is unimplemented here""" raise NotImplementedError def _get_state_list(self) -> list: diff --git a/tfworker/backends/s3.py b/tfworker/backends/s3.py index f6127e2..ae182dd 100644 --- a/tfworker/backends/s3.py +++ b/tfworker/backends/s3.py @@ -16,22 +16,28 @@ import os import sys from contextlib import closing +from pathlib import Path +from uuid import uuid4 import boto3 import botocore import click +from ..handlers import BaseHandler, HandlerError from .base import BackendError, BaseBackend, validate_backend_empty class S3Backend(BaseBackend): tag = "s3" auth_tag = "aws" + plan_storage = False def __init__(self, authenticators, definitions, deployment=None): self._authenticator = authenticators[self.auth_tag] self._definitions = definitions self._deployment = "undefined" + self._handlers = None + if deployment: self._deployment = deployment @@ -112,7 +118,7 @@ def __init__(self, authenticators, definitions, deployment=None): "Backend bucket not found and --no-create-backend-bucket specified." ) - # Generate a list of all files in the bucket, at the desired prefix for the deployment + # Generate a list of all files in the bucket, at the desired prefix for the deployment, used for "--all-remote-states" option and clean s3_paginator = self._s3_client.get_paginator("list_objects_v2").paginate( Bucket=self._authenticator.bucket, Prefix=self._authenticator.prefix, @@ -124,9 +130,22 @@ def __init__(self, authenticators, definitions, deployment=None): for key in page["Contents"]: # just append the last part of the prefix to the list self._bucket_files.add(key["Key"].split("/")[-2]) + try: + self._handlers = S3Handler(self._authenticator) + self.plan_storage = True + except HandlerError as e: + click.secho(f"Error initializing S3Handler: {e}") + raise SystemExit(1) + + @property + def handlers(self) -> dict: + """ + handlers returns a dictionary of handlers for the backend, ensure a singleton + """ + return {self.tag: self._handlers} def remotes(self) -> list: - """ return a list of the remote bucket keys """ + """return a list of the remote bucket keys""" return list(self._bucket_files) def _check_table_exists(self, name: str) -> bool: @@ -302,3 +321,171 @@ def filter_keys(paginator, bucket_name, prefix="/", delimiter="/", start_after=" yield content["Key"] except TypeError: pass + + +class S3Handler(BaseHandler): + """The S3Handler class is a handler for the s3 backend""" + + actions = ["plan", "apply"] + required_vars = [] + _is_ready = False + + def __init__(self, authenticator): + try: + self.execution_functions = { + "plan": { + "check": self._check_plan, + "post": self._post_plan, + }, + "apply": { + "pre": self._pre_apply, + }, + } + + self._authenticator = authenticator + self._s3_client = self._authenticator.backend_session.client("s3") + + except Exception as e: + raise HandlerError(f"Error initializing S3Handler: {e}") + + def is_ready(self): + if not self._is_ready: + filename = str(uuid4().hex[:6].upper()) + if self._s3_client.list_objects( + Bucket=self._authenticator.bucket, + Prefix=f"{self._authenticator.prefix}/{filename}", + ).get("Contents"): + raise HandlerError( + f"Error initializing S3Handler, remote file already exists: {filename}" + ) + try: + self._s3_client.upload_file( + "/dev/null", + self._authenticator.bucket, + f"{self._authenticator.prefix}/{filename}", + ) + except boto3.exceptions.S3UploadFailedError as e: + raise HandlerError( + f"Error initializing S3Handler, could not create file: {e}" + ) + try: + self._s3_client.delete_object( + Bucket=self._authenticator.bucket, + Key=f"{self._authenticator.prefix}/{filename}", + ) + except boto3.exceptions.S3UploadFailedError as e: + raise HandlerError( + f"Error initializing S3Handler, could not delete file: {e}" + ) + self._is_ready = True + return self._is_ready + + def execute(self, action, stage, **kwargs): + # save a copy of the planfile to the backend state bucket + if action in self.execution_functions.keys(): + if stage in self.execution_functions[action].keys(): + self.execution_functions[action][stage](**kwargs) + return None + + def _check_plan(self, planfile: Path, definition: str, **kwargs): + """check_plan runs while the plan is being checked, it should fetch a file from the backend and store it in the local location""" + # ensure planfile does not exist or is zero bytes if it does + remotefile = f"{self._authenticator.prefix}/{definition}/{planfile.name}" + if planfile.exists(): + if planfile.stat().st_size == 0: + planfile.unlink() + else: + raise HandlerError(f"planfile already exists: {planfile}") + + if self._s3_get_plan(planfile, remotefile): + if not planfile.exists(): + raise HandlerError(f"planfile not found after download: {planfile}") + click.secho( + f"remote planfile downloaded: s3://{self._authenticator.bucket}/{remotefile} -> {planfile}", + fg="yellow", + ) + + def _post_plan( + self, planfile: Path, definition: str, changes: bool = False, **kwargs + ): + """post_apply runs after the apply is complete, it should upload the planfile to the backend""" + logfile = planfile.with_suffix(".log") + remotefile = f"{self._authenticator.prefix}/{definition}/{planfile.name}" + remotelog = remotefile.replace(".tfplan", ".log") + if "text" in kwargs.keys(): + with open(logfile, "w") as f: + f.write(kwargs["text"]) + if planfile.exists() and changes: + if self._s3_put_plan(planfile, remotefile): + click.secho( + f"remote planfile uploaded: {planfile} -> s3://{self._authenticator.bucket}/{remotefile}", + fg="yellow", + ) + if self._s3_put_plan(logfile, remotelog): + click.secho( + f"remote logfile uploaded: {logfile} -> s3://{self._authenticator.bucket}/{remotelog}", + fg="yellow", + ) + return None + + def _pre_apply(self, planfile: Path, definition: str, **kwargs): + """_pre_apply runs before the apply is started, it should remove the planfile from the backend""" + logfile = planfile.with_suffix(".log") + remotefile = f"{self._authenticator.prefix}/{definition}/{planfile.name}" + remotelog = remotefile.replace(".tfplan", ".log") + if self._s3_delete_plan(remotefile, planfile): + click.secho( + f"remote planfile removed: s3://{self._authenticator.bucket}/{remotefile}", + fg="yellow", + ) + if self._s3_delete_plan(remotelog, logfile): + click.secho( + f"remote logfile removed: s3://{self._authenticator.bucket}/{remotelog}", + fg="yellow", + ) + return None + + def _s3_get_plan(self, planfile: Path, remotefile: str) -> bool: + """_get_plan downloads the file from s3""" + # fetch the planfile from the backend + downloaded = False + try: + self._s3_client.download_file( + self._authenticator.bucket, remotefile, planfile + ) + # make sure the local file exists, and is greater than 0 bytes + downloaded = True + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "404": + click.secho(f"remote plan {remotefile} not found", fg="yellow") + pass + else: + raise HandlerError(f"Error downloading planfile: {e}") + return downloaded + + def _s3_put_plan(self, planfile: Path, remotefile: str) -> bool: + """_put_plan uploads the file to s3""" + uploaded = False + # don't upload empty plans + if planfile.stat().st_size == 0: + return uploaded + try: + self._s3_client.upload_file( + str(planfile), self._authenticator.bucket, remotefile + ) + uploaded = True + except botocore.exceptions.ClientError as e: + raise HandlerError(f"Error uploading planfile: {e}") + return uploaded + + def _s3_delete_plan(self, remotefile: str, planfile: str) -> bool: + """_delete_plan removes a remote plan file""" + deleted = False + try: + self._s3_client.delete_object( + Bucket=self._authenticator.bucket, Key=remotefile + ) + deleted = True + except botocore.exceptions.ClientError as e: + raise HandlerError(f"Error deleting planfile: {e}") + return deleted diff --git a/tfworker/cli.py b/tfworker/cli.py index ea08d0c..23baca5 100644 --- a/tfworker/cli.py +++ b/tfworker/cli.py @@ -209,6 +209,12 @@ def validate_working_dir(fpath): envvar="WORKER_CLEAN", help="clean up the temporary directory created by the worker after execution", ) +@click.option( + "--backend-plans/--no-backend-plans", + default=False, + envvar="WORKER_BACKEND_PLANS", + help="store plans in the backend", +) @click.pass_context def cli(context, **kwargs): """CLI for the worker utility.""" @@ -262,6 +268,7 @@ def version(): "--plan/--no-plan", "tf_plan", envvar="WORKER_PLAN", + type=bool, default=True, help="toggle running a plan, plan will still be skipped if using a saved plan file with apply", ) @@ -339,9 +346,7 @@ def terraform(rootc, *args, **kwargs): try: tfc = TerraformCommand(rootc, *args, **kwargs) except FileNotFoundError as e: - click.secho( - f"terraform binary not found: {e.filename}", fg="red", err=True - ) + click.secho(f"terraform binary not found: {e.filename}", fg="red", err=True) raise SystemExit(1) click.secho(f"building deployment {kwargs.get('deployment')}", fg="green") diff --git a/tfworker/commands/base.py b/tfworker/commands/base.py index ccf9316..0117ea2 100644 --- a/tfworker/commands/base.py +++ b/tfworker/commands/base.py @@ -17,7 +17,7 @@ import click from tfworker.authenticators import AuthenticatorsCollection -from tfworker.backends import select_backend +from tfworker.backends import BackendError, select_backend from tfworker.definitions import DefinitionsCollection from tfworker.handlers import HandlersCollection from tfworker.handlers.exceptions import HandlerError, UnknownHandler @@ -106,12 +106,26 @@ def __init__(self, rootc, deployment="undefined", limit=tuple(), **kwargs): self._plugins = PluginsCollection( plugins_odict, self._temp_dir, self._provider_cache, self._tf_version_major ) - self._backend = select_backend( - self._resolve_arg("backend"), - deployment, - self._authenticators, - self._definitions, - ) + try: + self._backend = select_backend( + self._resolve_arg("backend"), + deployment, + self._authenticators, + self._definitions, + ) + except BackendError as e: + click.secho(e, fg="red") + raise SystemExit(1) + + # if backend_plans is requested, check if backend supports it + self._backend_plans = self._resolve_arg("backend_plans") + if self._backend_plans: + if not self._backend.plan_storage: + click.secho( + f"backend {self._backend.tag} does not support backend_plans", + fg="red", + ) + raise SystemExit(1) # initialize handlers collection try: @@ -120,6 +134,10 @@ def __init__(self, rootc, deployment="undefined", limit=tuple(), **kwargs): click.secho(e, fg="red") raise SystemExit(1) + # allow a backend to implement handlers as well since they already control the provider session + if self._backend.handlers: + self._handlers.update(self._backend.handlers) + @property def authenticators(self): return self._authenticators @@ -148,18 +166,18 @@ def temp_dir(self): def repository_path(self): return self._repository_path - def _execute_handlers(self, action, **kwargs): + def _execute_handlers(self, action, stage, **kwargs): """Execute all ready handlers for supported actions""" for h in self._handlers: if action in h.actions and h.is_ready(): - h.execute(action, **kwargs) + h.execute(action, stage, **kwargs) def _resolve_arg(self, name): """Resolve argument in order of precedence: 1) CLI argument 2) Config file """ - if name in self._args_dict and self._args_dict[name]: + if name in self._args_dict and self._args_dict[name] is not None: return self._args_dict[name] if name in self._rootc.worker_options_odict: return self._rootc.worker_options_odict[name] diff --git a/tfworker/commands/terraform.py b/tfworker/commands/terraform.py index 5997a2e..99abc45 100644 --- a/tfworker/commands/terraform.py +++ b/tfworker/commands/terraform.py @@ -22,6 +22,7 @@ import click from tfworker.commands.base import BaseCommand +from tfworker.definitions import Definition from tfworker.handlers.exceptions import HandlerError from tfworker.util.system import pipe_exec, strip_ansi @@ -41,15 +42,20 @@ class TerraformError(Exception): class TerraformCommand(BaseCommand): def __init__(self, rootc, **kwargs): super(TerraformCommand, self).__init__(rootc, **kwargs) - self._destroy = self._resolve_arg("destroy") self._tf_apply = self._resolve_arg("tf_apply") self._tf_plan = self._resolve_arg("tf_plan") self._plan_file_path = self._resolve_arg("plan_file_path") + if self._tf_apply and self._destroy: click.secho("can not apply and destroy at the same time", fg="red") raise SystemExit(1) + if self._backend_plans and not self._plan_file_path: + # create a plan file path in the tmp dir + self._plan_file_path = f"{self._temp_dir}/plans" + pathlib.Path(self._plan_file_path).mkdir(parents=True, exist_ok=True) + self._b64_encode = self._resolve_arg("b64_encode") self._deployment = kwargs["deployment"] self._force = self._resolve_arg("force") @@ -63,6 +69,7 @@ def __init__(self, rootc, **kwargs): @property def plan_for(self): + """plan_for will either be apply or destroy, indicating what action is being planned for""" return self._plan_for @property @@ -101,10 +108,8 @@ def prep_modules(self): ignore=shutil.ignore_patterns("test", ".terraform", "terraform.tfstate*"), ) - def exec(self): - """exec handles running the terraform chain""" - skip_plan = False - + def _prep_and_init(self, def_iter: iter = None) -> None: + """_prep_and_init prepares the modules and runs terraform init""" try: def_iter = self.definitions.limited() except ValueError as e: @@ -112,11 +117,10 @@ def exec(self): raise SystemExit(1) for definition in def_iter: - execute = False - plan_file = None # copy definition files / templates etc. click.secho(f"preparing definition: {definition.tag}", fg="green") definition.prep(self._backend) + # run terraform init try: self._run(definition, "init", debug=self._show_output) @@ -124,108 +128,237 @@ def exec(self): click.secho("error running terraform init", fg="red") raise SystemExit(1) - # validate the path and resolve the plan file name - if self._plan_file_path: - plan_path = pathlib.Path.absolute(pathlib.Path(self._plan_file_path)) - if not (plan_path.exists() and plan_path.is_dir()): - click.secho( - f'plan path "{plan_path}" is not suitable, it is not an existing directory' - ) - raise SystemExit() - plan_file = pathlib.Path( - f"{plan_path}/{self._deployment}_{definition.tag}.tfplan" - ) - click.secho(f"using plan file:{plan_file}", fg="yellow") - - # check if a plan file for the given deployment/definition exists, if so - # do not plan again - if plan_file is not None: - # if plan file is set, check if it exists, if it does do not plan again - if plan_file.exists(): - if self._tf_plan: - click.secho(f"plan file {plan_file} exists, not planning again", fg="red") - execute = True - skip_plan = True - - if skip_plan is False and self._tf_plan: - # run terraform plan + def _check_plan(self, definition: Definition) -> (bool, bool): + """determines if a plan is needed""" + # when no plan path is specified, it's straight forward + if self._plan_file_path is None: + if self._tf_plan is False: + definition._ready_to_apply = True + return False + else: + definition._ready_to_apply = False + return True + + # a lot more to consider when a plan path is specified + plan_path = pathlib.Path.absolute(pathlib.Path(self._plan_file_path)) + plan_file = pathlib.Path( + f"{plan_path}/{self._deployment}_{definition.tag}.tfplan" + ) + definition.plan_file = plan_file + click.secho(f"using plan file:{plan_file}", fg="yellow") + + if not (plan_path.exists() and plan_path.is_dir()): + click.secho( + f'plan path "{plan_path}" is not suitable, it is not an existing directory' + ) + raise SystemExit() + + # run all the handlers with with action plan and stage check + try: + self._execute_handlers( + action="plan", + stage="check", + deployment=self._deployment, + definition=definition.tag, + planfile=plan_file, + ) + except HandlerError as e: + click.secho(f"handler error: {e}", fg="red") + raise SystemExit(1) + + # if --no-plan is specified, skip planning step regardless of other conditions + if self._tf_plan is False: + definition._ready_to_apply = True + return False + + # planning was requested, check if existing plan is suitable + if plan_file.exists(): + if plan_file.stat().st_size == 0: click.secho( - f"planning definition for {self._plan_for}: {definition.tag}", + f"exiting plan file {plan_file} exists but is empty; planning again", fg="green", ) + definition._ready_to_apply = False + return True + click.secho( + f"existing plan file {plan_file} is suitable for apply; not planning again; remove plan file to allow planning", + fg="green", + ) + definition._ready_to_apply = True + return False - try: - self._run( - definition, - "plan", - debug=self._show_output, - plan_action=self._plan_for, - plan_file=str(plan_file), - ) - except PlanChange: - # on destroy, terraform ALWAYS indicates a plan change - click.secho( - f"plan changes for {self._plan_for} {definition.tag}", fg="red" - ) - execute = True - try: - self._execute_handlers( - action="plan", - deployment=self._deployment, - definition=definition.tag, - text=strip_ansi(self._terraform_output["stdout"].decode()), - planfile=plan_file, - ) - except HandlerError as e: - click.secho(f"handler error: {e}", fg="red") - - except TerraformError: - click.secho( - f"error planning terraform definition: {definition.tag}!", - fg="red", - ) - raise SystemExit(2) + # All of the false conditions have been returned, so we need to plan + definition._ready_to_apply = False + return True + + def _exec_plan(self, definition) -> bool: + """_exec_plan executes a terraform plan, returns true if a plan has changes""" + changes = False - if not execute: - click.secho(f"no plan changes for {definition.tag}", fg="yellow") + # call handlers for pre plan + try: + self._execute_handlers( + action="plan", + stage="pre", + deployment=self._deployment, + definition=definition.tag, + ) + except HandlerError as e: + click.secho(f"handler error: {e}", fg="red") + raise SystemExit(1) - if self._force and (self._tf_apply or self._destroy): - execute = True - click.secho(f"forcing {self._plan_for} due to --force", fg="red") + click.secho( + f"planning definition for {self._plan_for}: {definition.tag}", + fg="green", + ) - # there are no plan changes, and no forced execution, so move on - if not execute: - continue + try: + self._run( + definition, + "plan", + debug=self._show_output, + plan_action=self._plan_for, + plan_file=str(definition.plan_file), + ) + except PlanChange: + # on destroy, terraform ALWAYS indicates a plan change + click.secho(f"plan changes for {self._plan_for} {definition.tag}", fg="red") + definition._ready_to_apply = True + changes = True + except TerraformError: + click.secho( + f"error planning terraform definition: {definition.tag}!", + fg="red", + ) + raise SystemExit(2) - # no apply/destroy requested, so move on - if not self._tf_apply and not self._destroy: - continue + try: + self._execute_handlers( + action="plan", + stage="post", + deployment=self._deployment, + definition=definition.tag, + text=strip_ansi(self._terraform_output["stdout"].decode()), + planfile=definition.plan_file, + changes=changes, + ) + except HandlerError as e: + click.secho(f"handler error: {e}", fg="red") - try: - self._run( - definition, - self._plan_for, - debug=self._show_output, - plan_file=str(plan_file), - ) - except TerraformError: + if not changes: + click.secho(f"no plan changes for {definition.tag}", fg="yellow") + + return changes + + def _check_apply_or_destroy(self, changes, definition) -> bool: + """_check_apply_or_destroy determines if a terraform execution is needed""" + # never apply if --no-apply is used + if self._tf_apply is not True: + return False + + # if not changes and not force, skip apply + if not (changes or definition._ready_to_apply) and not self._force: + click.secho("no changes, skipping terraform apply", fg="yellow") + return False + + # if the definition plan file exists, and is not empty then apply + if self._plan_file_path is not None: + if not definition.plan_file.exists(): click.secho( - f"error with terraform {self._plan_for} on definition" - f" {definition.tag}, exiting", + f"plan file {definition.plan_file} does not exist, can't apply", fg="red", ) - if plan_file is not None: - # plan file yeilded an error and has been consumed - plan_file.unlink() - raise SystemExit(2) - else: - click.secho( - f"terraform {self._plan_for} complete for {definition.tag}", - fg="green", - ) - if plan_file is not None: - # plan file succeeded and has been consumed - plan_file.unlink() + return False + + # if --force is specified, always apply + if self._force: + click.secho( + f"--force specified, proceeding with apply for {definition.tag} anyway", + ) + return True + + # All of the false conditions have been returned + return True + + def _exec_apply_or_destroy(self, definition) -> None: + """_exec_apply_or_destroy executes a terraform apply or destroy""" + # call handlers for pre apply + click.secho( + f"DEBUG: executing {self._plan_for} for {definition.tag}", fg="yellow" + ) + try: + self._execute_handlers( + action=self._plan_for, + stage="pre", + deployment=self._deployment, + definition=definition.tag, + planfile=definition.plan_file, + ) + except HandlerError as e: + click.secho(f"handler error: {e}", fg="red") + raise SystemExit(1) + + # execute terraform apply or destroy + tf_error = False + try: + self._run( + definition, + self._plan_for, + debug=self._show_output, + plan_file=definition.plan_file, + ) + except TerraformError: + tf_error = True + + # remove the plan file if it exists + if definition.plan_file is not None and definition.plan_file.exists(): + definition.plan_file.unlink() + + # call handlers for post apply/destroy + try: + self._execute_handlers( + action=self._plan_for, + stage="post", + deployment=self._deployment, + definition=definition.tag, + planfile=definition.plan_file, + error=tf_error, + ) + except HandlerError as e: + click.secho(f"handler error: {e}", fg="red") + raise SystemExit(1) + + if tf_error is True: + click.secho( + f"error executing terraform {self._plan_for} for {definition.tag}", + fg="red", + ) + raise SystemExit(2) + else: + click.secho( + f"terraform {self._plan_for} complete for {definition.tag}", + fg="green", + ) + + def exec(self): + """exec handles running the terraform chain""" + try: + def_iter = self.definitions.limited() + except ValueError as e: + click.secho(f"Error with supplied limit: {e}", fg="red") + raise SystemExit(1) + + # prepare the modules and run terraform init + self._prep_and_init(def_iter) + + for definition in def_iter: + changes = None + # exec planning step if we should + if self._check_plan(definition): + changes = self._exec_plan(definition) + # exec apply step if we should + if self._check_apply_or_destroy(changes, definition): + self._exec_apply_or_destroy(definition) def _run( self, definition, command, debug=False, plan_action="init", plan_file=None diff --git a/tfworker/definitions.py b/tfworker/definitions.py index 42709a8..e278520 100644 --- a/tfworker/definitions.py +++ b/tfworker/definitions.py @@ -15,7 +15,7 @@ import collections import copy import json -from pathlib import Path, PurePath +from pathlib import Path, PosixPath, PurePath, WindowsPath import click import hcl2 @@ -38,6 +38,9 @@ class ReservedFileError(Exception): class Definition: + _plan_file = None + _ready_to_apply = False + def __init__( self, definition, @@ -67,6 +70,7 @@ def __init__( body.get("template_vars", dict()), global_template_vars ) + self._always_apply = body.get("always_apply", False) self._deployment = deployment self._repository_path = repository_path self._providers = providers @@ -107,6 +111,16 @@ def provider_names(self): result = result.intersection(required_providers) return result + @property + def plan_file(self): + return self._plan_file + + @plan_file.setter + def plan_file(self, value: Path): + if type(value) not in [PosixPath, WindowsPath, Path]: + raise TypeError("plan_file must be a Path like object") + self._plan_file = value + def prep(self, backend): """prepare the definitions for running""" @@ -258,7 +272,7 @@ def __init__( tf_version_major, True if limit and definition in limit else False, template_callback=self.render_templates, - use_backend_remotes=self._root_args.backend_use_all_remotes + use_backend_remotes=self._root_args.backend_use_all_remotes, ) def __len__(self): diff --git a/tfworker/handlers/__init__.py b/tfworker/handlers/__init__.py index d0c4e07..b83a70b 100644 --- a/tfworker/handlers/__init__.py +++ b/tfworker/handlers/__init__.py @@ -1,5 +1,6 @@ import collections +from .base import BaseHandler from .bitbucket import BitbucketHandler from .exceptions import HandlerError, UnknownHandler @@ -34,6 +35,18 @@ def __getitem__(self, value): def __iter__(self): return iter(self._handlers.values()) + def __setitem__(self, key, value): + self._handlers[key] = value + + def update(self, handlers_config): + """ + update is used to update the handlers collection with new handlers + """ + for k in handlers_config: + if k in self._handlers.keys(): + raise TypeError(f"Duplicate handler: {k}") + self._handlers[k] = handlers_config[k] + def get(self, value): try: return self[value] diff --git a/tfworker/handlers/base.py b/tfworker/handlers/base.py index 6a587cf..0b96928 100644 --- a/tfworker/handlers/base.py +++ b/tfworker/handlers/base.py @@ -9,10 +9,17 @@ class BaseHandler(metaclass=ABCMeta): @abstractmethod def is_ready(self): # pragma: no cover + """is_ready is called to determine if a handler is ready to be executed""" return True @abstractmethod - def execute(self, action, **kwargs): # pragma: no cover + def execute(self, action: str, stage: str, **kwargs) -> None: # pragma: no cover + """ + execute is called when a handler should trigger, it accepts to parameters + action: the action that triggered the handler (one of plan, clean, apply, destroy) + stage: the stage of the action (one of pre, post) + kwargs: any additional arguments that may be required + """ pass diff --git a/tfworker/handlers/bitbucket.py b/tfworker/handlers/bitbucket.py index 5bb7922..3330040 100644 --- a/tfworker/handlers/bitbucket.py +++ b/tfworker/handlers/bitbucket.py @@ -71,7 +71,7 @@ def is_ready(self): """ return self._ready - def execute(self, action, **kwargs): + def execute(self, action, stage, **kwargs): """ execute is a generic method that will execute the specified action with the provided arguments. """ diff --git a/tfworker/plugins.py b/tfworker/plugins.py index a5e9f86..0ea234b 100644 --- a/tfworker/plugins.py +++ b/tfworker/plugins.py @@ -21,10 +21,12 @@ import zipfile import click -from tenacity import retry, stop_after_attempt, wait_chain, wait_fixed, RetryError, retry_if_not_exception_message +from tenacity import (RetryError, retry, retry_if_not_exception_message, + stop_after_attempt, wait_chain, wait_fixed) from tfworker.commands.root import get_platform + class PluginSourceParseException(Exception): pass @@ -101,6 +103,7 @@ def download(self): click.secho(str(e), fg="red") click.Abort() + class PluginSource: """ Utility object for divining the local module path details from a provider diff --git a/tfworker/providers/base.py b/tfworker/providers/base.py index 7af66c7..d02797c 100644 --- a/tfworker/providers/base.py +++ b/tfworker/providers/base.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + class BaseProvider: tag = None @@ -79,7 +80,7 @@ def _hclify(self, s, depth=4): _hcify is a recursive function that takes a string, list or dict and turns the results into an HCL compliant string. """ - space = ' ' + space = " " result = [] if isinstance(s, str): tmps = s.replace('"', "").replace("'", "") @@ -92,12 +93,14 @@ def _hclify(self, s, depth=4): # check of the value is required to determine how to handle the key for k in s.keys(): if isinstance(s[k], str): - result.append(f"{space * depth}{k} = \"{s[k]}\"") + result.append(f'{space * depth}{k} = "{s[k]}"') elif isinstance(s[k], list): result.append(f"{space * depth}{k} = [{s[k]}]") elif isinstance(s[k], dict): # decrease depth by 4 to account for extra depth added by hclyifying the key - result.append(f"{space * (depth-4)}{self._hclify(k, depth=depth)} = {{") + result.append( + f"{space * (depth-4)}{self._hclify(k, depth=depth)} = {{" + ) result.append(self._hclify(s[k], depth=depth + 2)) result.append(f"{space * depth}}}") else: @@ -107,6 +110,7 @@ def _hclify(self, s, depth=4): return "\n".join(result) + class UnknownProvider(Exception): def __init__(self, provider): super().__init__(f"{provider} is not a known value.") diff --git a/tfworker/util/copier.py b/tfworker/util/copier.py index a74e327..708e1c8 100644 --- a/tfworker/util/copier.py +++ b/tfworker/util/copier.py @@ -169,7 +169,9 @@ def copy(self, **kwargs) -> None: if exitcode != 0: self.clean_temp() - raise RuntimeError(f"unable to clone {self._source}, {stderr.decode('utf-8')}") + raise RuntimeError( + f"unable to clone {self._source}, {stderr.decode('utf-8')}" + ) try: self.check_conflicts(temp_path)