From d016f3ea79fb5175246da749358df81d40537e4e Mon Sep 17 00:00:00 2001 From: ricolin Date: Fri, 1 Nov 2024 16:16:50 +0800 Subject: [PATCH 1/8] fix pep8 --- setup.cfg | 4 +++- staffeln/cmd/dbmanage.py | 6 ++++-- staffeln/conductor/backup.py | 41 +++++++++++++----------------------- staffeln/conductor/result.py | 8 +++---- 4 files changed, 25 insertions(+), 34 deletions(-) diff --git a/setup.cfg b/setup.cfg index 5466daa..53df199 100755 --- a/setup.cfg +++ b/setup.cfg @@ -17,6 +17,8 @@ classifier = Programming Language :: Python :: 3 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 Programming Language :: Python :: 3 :: Only Programming Language :: Python :: Implementation :: CPython @@ -35,4 +37,4 @@ console_scripts = wsgi_scripts = staffeln-api-wsgi = staffeln.api:app staffeln.database.migration_backend = - sqlalchemy = staffeln.db.sqlalchemy.migration \ No newline at end of file + sqlalchemy = staffeln.db.sqlalchemy.migration diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py index d4706cf..5801a36 100644 --- a/staffeln/cmd/dbmanage.py +++ b/staffeln/cmd/dbmanage.py @@ -24,10 +24,12 @@ def do_upgrade(): def add_command_parsers(subparsers): - parser = subparsers.add_parser("create_schema", help="Create the database schema.") + parser = subparsers.add_parser( + "create_schema", help="Create the database schema.") parser.set_defaults(func=DBCommand.create_schema) - parser = subparsers.add_parser("upgrade", help="Upgrade the database schema.") + parser = subparsers.add_parser( + "upgrade", help="Upgrade the database schema.") parser.add_argument("revision", nargs="?") parser.set_defaults(func=DBCommand.do_upgrade) diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py index 05f5cfd..8375bb5 100755 --- a/staffeln/conductor/backup.py +++ b/staffeln/conductor/backup.py @@ -252,10 +252,8 @@ def soft_remove_backup_task(self, backup_object): backup = self.openstacksdk.get_backup(backup_object.backup_id) if backup is None: LOG.info( - _( - f"Backup {backup_object.backup_id} is removed from " - "Openstack or cinder-backup is not existing in the cloud." - ) + f"Backup {backup_object.backup_id} is removed from " + "Openstack or cinder-backup is not existing in the cloud." ) return backup_object.delete_backup() if backup["status"] in ("available"): @@ -269,15 +267,13 @@ def soft_remove_backup_task(self, backup_object): # backup_object.delete_backup() else: # "deleting", "restoring" LOG.info( - _( - "Rotation for the backup %s is skipped in this cycle " - "because it is in %s status" - ) - % (backup_object.backup_id, backup["status"]) + f"Rotation for the backup {backup_object.backup_id} " + "is skipped in this cycle " + f"because it is in {backup['status']} status" ) except OpenstackSDKException as e: - LOG.warn(_(f"Backup {backup_object.backup_id} deletion failed. {str(e)}")) + LOG.warn(f"Backup {backup_object.backup_id} deletion failed. {str(e)}") # We don't delete backup object if any exception occured # backup_object.delete_backup() return False @@ -288,12 +284,10 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): project_id = backup_object.project_id if project_id not in self.project_list: LOG.warn( - _( - f"Project {project_id} for backup " - f"{backup_object.backup_id} is not existing in " - "Openstack. Please check your access right to this project. " - "Skip this backup from remove now and will retry later." - ) + f"Project {project_id} for backup " + f"{backup_object.backup_id} is not existing in " + "Openstack. Please check your access right to this project. " + "Skip this backup from remove now and will retry later." ) # Don't remove backup object, keep it and retry on next periodic task # backup_object.delete_backup() @@ -305,12 +299,9 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): ) if backup is None: LOG.info( - _( - "Backup %s is removed from Openstack " - "or cinder-backup is not existing in the cloud. " - "Start removing backup object from Staffeln." - % backup_object.backup_id - ) + f"Backup {backup_object.backup_id} is removed from Openstack " + "or cinder-backup is not existing in the cloud. " + "Start removing backup object from Staffeln." ) return backup_object.delete_backup() @@ -322,10 +313,8 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): LOG.debug(str(e)) else: LOG.info( - _( - f"Backup {backup_object.backup_id} deletion failed. " - "Skip this backup from remove now and will retry later." - ) + f"Backup {backup_object.backup_id} deletion failed. " + "Skip this backup from remove now and will retry later." ) LOG.debug(f"deletion failed {str(e)}") diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py index 34d9660..be07aff 100644 --- a/staffeln/conductor/result.py +++ b/staffeln/conductor/result.py @@ -68,14 +68,12 @@ def send_result_email(self, project_id, subject=None, project_name=None): "smtp_server_port": CONF.notification.smtp_server_port, } email.send(smtp_profile) - LOG.info(_(f"Backup result email sent to {receiver}")) + LOG.info(f"Backup result email sent to {receiver}") return True except Exception as e: LOG.warn( - _( - f"Backup result email send to {receiver} failed. " - f"Please check email configuration. {str(e)}" - ) + f"Backup result email send to {receiver} failed. " + f"Please check email configuration. {str(e)}" ) raise From e884f12c7a172641640d6f9d5cfb13374ccd9a3a Mon Sep 17 00:00:00 2001 From: ricolin Date: Fri, 1 Nov 2024 16:28:13 +0800 Subject: [PATCH 2/8] Add pre-commit --- .gitignore | 2 +- .pre-commit-config.yaml | 11 + doc/source/conf.py | 5 +- hack/stack.sh | 2 +- releasenotes/source/conf.py | 7 +- requirements.txt | 3 +- setup.py | 3 +- staffeln/__init__.py | 2 +- staffeln/api/app.py | 13 +- staffeln/api/middleware/parsable_error.py | 7 +- staffeln/api/wsgi.py | 2 + staffeln/cmd/api.py | 6 +- staffeln/cmd/conductor.py | 15 +- staffeln/cmd/dbmanage.py | 16 +- staffeln/common/auth.py | 2 + staffeln/common/config.py | 2 + staffeln/common/constants.py | 2 + staffeln/common/context.py | 10 +- staffeln/common/email.py | 16 +- staffeln/common/lock.py | 15 +- staffeln/common/openstack.py | 48 +- staffeln/common/service.py | 6 +- staffeln/common/short_id.py | 3 + staffeln/common/time.py | 17 +- staffeln/conductor/backup.py | 183 ++++--- staffeln/conductor/manager.py | 95 ++-- staffeln/conductor/result.py | 68 ++- staffeln/conf/__init__.py | 9 +- staffeln/conf/api.py | 7 +- staffeln/conf/conductor.py | 30 +- staffeln/conf/database.py | 7 +- staffeln/conf/notify.py | 10 +- staffeln/conf/paths.py | 7 +- staffeln/db/api.py | 7 +- staffeln/db/base.py | 2 + staffeln/db/migration.py | 6 +- staffeln/db/sqlalchemy/alembic/env.py | 6 +- .../versions/041d9a0f1159_backup_add_names.py | 10 +- ...d_add_reason_column_to_queue_data_table.py | 4 +- .../5b2e78435231_add_report_timestamp.py | 10 +- .../ebdbed01e9a7_added_incremental_field.py | 12 +- staffeln/db/sqlalchemy/api.py | 32 +- staffeln/db/sqlalchemy/migration.py | 5 +- staffeln/db/sqlalchemy/models.py | 9 +- staffeln/exception.py | 5 +- staffeln/i18n.py | 2 + staffeln/objects/__init__.py | 2 + staffeln/objects/base.py | 16 +- staffeln/objects/fields.py | 3 + staffeln/objects/queue.py | 11 +- staffeln/objects/report.py | 11 +- staffeln/objects/volume.py | 17 +- staffeln/tests/base.py | 3 +- staffeln/tests/common/test_openstacksdk.py | 503 ++++++++++++++++++ staffeln/tests/test_staffeln.py | 3 +- staffeln/version.py | 2 + tox.ini | 2 +- 57 files changed, 1066 insertions(+), 248 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 staffeln/tests/common/test_openstacksdk.py diff --git a/.gitignore b/.gitignore index 32e35b0..1ae05e4 100755 --- a/.gitignore +++ b/.gitignore @@ -68,4 +68,4 @@ releasenotes/build *.log # envvar openrc file -*openrc.sh \ No newline at end of file +*openrc.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..1dbe818 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/pycqa/flake8 + rev: 7.0.0 + hooks: + - id: flake8 + args: [--max-line-length=79] diff --git a/doc/source/conf.py b/doc/source/conf.py index 1c590d6..7a12515 100755 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -11,6 +11,7 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import os import sys @@ -39,8 +40,8 @@ # openstackdocstheme options openstackdocs_repo_name = "openstack/staffeln" openstackdocs_bug_project = ( - "replace with the name of the project on Launchpad or the ID from Storyboard" -) + "replace with the name of the project on " + "Launchpad or the ID from Storyboard") openstackdocs_bug_tag = "" # If true, '()' will be appended to :func: etc. cross-reference text. diff --git a/hack/stack.sh b/hack/stack.sh index 497b29a..4876c88 100755 --- a/hack/stack.sh +++ b/hack/stack.sh @@ -36,7 +36,7 @@ SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 SWIFT_REPLICAS=1 enable_plugin neutron https://opendev.org/openstack/neutron #swift -enable_service s-proxy s-object s-container s-account +enable_service s-proxy s-object s-container s-account # Cinder enable_service c-bak [[post-config|/etc/neutron/neutron.conf]] diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py index 65d2460..d4b373e 100755 --- a/releasenotes/source/conf.py +++ b/releasenotes/source/conf.py @@ -11,7 +11,6 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. - # This file is execfile()d with the current directory set to its # containing dir. # @@ -20,20 +19,18 @@ # # All configuration values have a default; values that are commented out # serve to show the default. - # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) - # -- General configuration ------------------------------------------------ - # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' - # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. +from __future__ import annotations + extensions = [ "openstackdocstheme", "reno.sphinxext", diff --git a/requirements.txt b/requirements.txt index 3789929..2372ce2 100755 --- a/requirements.txt +++ b/requirements.txt @@ -20,5 +20,4 @@ parse tooz # Apache-2.0 sherlock>=0.4.1 # MIT kubernetes # Apache-2.0 -# email -# smtplib +pre-commit diff --git a/setup.py b/setup.py index 0346ed3..673123b 100755 --- a/setup.py +++ b/setup.py @@ -12,8 +12,9 @@ # implied. # See the License for the specific language governing permissions and # limitations under the License. - # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT +from __future__ import annotations + import setuptools setuptools.setup(setup_requires=["pbr"], pbr=True) diff --git a/staffeln/__init__.py b/staffeln/__init__.py index 5612b0d..78fc3f2 100755 --- a/staffeln/__init__.py +++ b/staffeln/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -11,6 +10,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import annotations import pbr.version diff --git a/staffeln/api/app.py b/staffeln/api/app.py index bf1234e..b19b9b8 100755 --- a/staffeln/api/app.py +++ b/staffeln/api/app.py @@ -1,7 +1,12 @@ -from flask import Flask, Response, request +from __future__ import annotations + +from flask import Flask +from flask import request +from flask import Response from oslo_log import log -from staffeln import objects + from staffeln.common import context +from staffeln import objects ctx = context.make_context() app = Flask(__name__) @@ -22,8 +27,8 @@ def backup_id(): backup = objects.Volume.get_backup_by_backup_id( # pylint: disable=E1120 context=ctx, backup_id=request.args["backup_id"] ) - # backup_info is None when there is no entry of the backup id in backup_table. - # So the backup should not be the automated backup. + # backup_info is None when there is no entry of the backup id in + # backup_table. So the backup should not be the automated backup. if backup is None: return Response( "True", diff --git a/staffeln/api/middleware/parsable_error.py b/staffeln/api/middleware/parsable_error.py index 2b49f83..2443201 100755 --- a/staffeln/api/middleware/parsable_error.py +++ b/staffeln/api/middleware/parsable_error.py @@ -17,8 +17,10 @@ Based on pecan.middleware.errordocument """ +from __future__ import annotations from oslo_serialization import jsonutils + from staffeln.i18n import _ @@ -78,7 +80,10 @@ def replacement_start_response(status, headers, exc_info=None): state["status_code"] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception( - _("ErrorDocumentMiddleware received an invalid " "status %s") + _( + "ErrorDocumentMiddleware received an invalid " + "status %s" + ) % status ) else: diff --git a/staffeln/api/wsgi.py b/staffeln/api/wsgi.py index bef4092..6965beb 100755 --- a/staffeln/api/wsgi.py +++ b/staffeln/api/wsgi.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import app if __name__ == "__main__": diff --git a/staffeln/cmd/api.py b/staffeln/cmd/api.py index a46656c..ba2ce09 100755 --- a/staffeln/cmd/api.py +++ b/staffeln/cmd/api.py @@ -1,11 +1,15 @@ """Starter script for Staffeln API service""" + +from __future__ import annotations + import os import sys -import staffeln.conf from oslo_log import log as logging + from staffeln.api import app as api_app from staffeln.common import service +import staffeln.conf from staffeln.i18n import _ CONF = staffeln.conf.CONF diff --git a/staffeln/cmd/conductor.py b/staffeln/cmd/conductor.py index f4c9579..3b2b84e 100755 --- a/staffeln/cmd/conductor.py +++ b/staffeln/cmd/conductor.py @@ -1,10 +1,13 @@ """Starter script for the staffeln conductor service.""" +from __future__ import annotations + import cotyledon -import staffeln.conf from cotyledon import oslo_config_glue + from staffeln.common import service from staffeln.conductor import manager +import staffeln.conf CONF = staffeln.conf.CONF @@ -13,9 +16,15 @@ def main(): service.prepare_service() sm = cotyledon.ServiceManager() - sm.add(manager.BackupManager, workers=CONF.conductor.backup_workers, args=(CONF,)) sm.add( - manager.RotationManager, workers=CONF.conductor.rotation_workers, args=(CONF,) + manager.BackupManager, + workers=CONF.conductor.backup_workers, + args=(CONF,), + ) + sm.add( + manager.RotationManager, + workers=CONF.conductor.rotation_workers, + args=(CONF,), ) oslo_config_glue.setup(sm, CONF) sm.run() diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py index 5801a36..433b7d7 100644 --- a/staffeln/cmd/dbmanage.py +++ b/staffeln/cmd/dbmanage.py @@ -2,11 +2,14 @@ Run storage database migration. """ +from __future__ import annotations + import sys from oslo_config import cfg -from staffeln import conf + from staffeln.common import service +from staffeln import conf from staffeln.db import migration CONF = conf.CONF @@ -25,17 +28,22 @@ def do_upgrade(): def add_command_parsers(subparsers): parser = subparsers.add_parser( - "create_schema", help="Create the database schema.") + "create_schema", help="Create the database schema." + ) parser.set_defaults(func=DBCommand.create_schema) parser = subparsers.add_parser( - "upgrade", help="Upgrade the database schema.") + "upgrade", help="Upgrade the database schema." + ) parser.add_argument("revision", nargs="?") parser.set_defaults(func=DBCommand.do_upgrade) command_opt = cfg.SubCommandOpt( - "command", title="Command", help="Available commands", handler=add_command_parsers + "command", + title="Command", + help="Available commands", + handler=add_command_parsers, ) diff --git a/staffeln/common/auth.py b/staffeln/common/auth.py index e23ef71..b64c515 100755 --- a/staffeln/common/auth.py +++ b/staffeln/common/auth.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import openstack diff --git a/staffeln/common/config.py b/staffeln/common/config.py index f71a378..5b69fb4 100755 --- a/staffeln/common/config.py +++ b/staffeln/common/config.py @@ -1,4 +1,6 @@ # from staffeln.common import rpc +from __future__ import annotations + import staffeln.conf from staffeln import version diff --git a/staffeln/common/constants.py b/staffeln/common/constants.py index b7d6d09..d065966 100644 --- a/staffeln/common/constants.py +++ b/staffeln/common/constants.py @@ -1,3 +1,5 @@ +from __future__ import annotations + BACKUP_INIT = 4 BACKUP_FAILED = 3 BACKUP_COMPLETED = 2 diff --git a/staffeln/common/context.py b/staffeln/common/context.py index c6046e1..d789a58 100644 --- a/staffeln/common/context.py +++ b/staffeln/common/context.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from oslo_context import context from oslo_log import log @@ -5,7 +7,11 @@ class RequestContext(context.RequestContext): - """Added security context with request parameters from openstack common library""" + """Added security context + + Added security context with request + parameters from openstack common library + """ def __init__( self, @@ -14,7 +20,7 @@ def __init__( instance_id=None, executed_at=None, backup_status=None, - **kwargs + **kwargs, ): self.backup_id = backup_id self.volume_id = volume_id diff --git a/staffeln/common/email.py b/staffeln/common/email.py index cf6e937..79d7225 100644 --- a/staffeln/common/email.py +++ b/staffeln/common/email.py @@ -1,10 +1,12 @@ """ Email module with SMTP""" -import smtplib -from email import utils +from __future__ import annotations + from email.header import Header from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText +from email import utils +import smtplib from oslo_log import log @@ -32,10 +34,12 @@ def send(smtp_profile): try: smtp_obj = smtplib.SMTP( - smtp_profile["smtp_server_domain"], smtp_profile["smtp_server_port"] + smtp_profile["smtp_server_domain"], + smtp_profile["smtp_server_port"], ) smtp_obj.connect( - smtp_profile["smtp_server_domain"], smtp_profile["smtp_server_port"] + smtp_profile["smtp_server_domain"], + smtp_profile["smtp_server_port"], ) smtp_obj.ehlo() smtp_obj.starttls() @@ -43,7 +47,9 @@ def send(smtp_profile): # SMTP Login smtp_obj.login(smtp_profile["src_email"], smtp_profile["src_pwd"]) smtp_obj.sendmail( - smtp_profile["src_email"], smtp_profile["dest_email"], msg.as_string() + smtp_profile["src_email"], + smtp_profile["dest_email"], + msg.as_string(), ) # Email Sent except smtplib.SMTPException as error: diff --git a/staffeln/common/lock.py b/staffeln/common/lock.py index 4c05626..5f21bf6 100644 --- a/staffeln/common/lock.py +++ b/staffeln/common/lock.py @@ -1,16 +1,20 @@ +from __future__ import annotations + import errno import glob import os import re import sys -import uuid from typing import Optional # noqa: H301 +import uuid -import sherlock from oslo_log import log -from staffeln import conf, exception +import sherlock from tooz import coordination +from staffeln import conf +from staffeln import exception + CONF = conf.CONF LOG = log.getLogger(__name__) @@ -146,7 +150,10 @@ class K8sCoordinator(object): """ def __init__( - self, expire: int = 3600, timeout: int = 10, namespace: str = "openstack" + self, + expire: int = 3600, + timeout: int = 10, + namespace: str = "openstack", ): self.timeout = timeout self.expire = expire diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py index 9f412e8..ea566e3 100644 --- a/staffeln/common/openstack.py +++ b/staffeln/common/openstack.py @@ -1,5 +1,10 @@ -from openstack import exceptions, proxy +from __future__ import annotations + +from openstack import exceptions +from openstack import proxy from oslo_log import log +import tenacity + from staffeln.common import auth from staffeln.i18n import _ @@ -16,7 +21,9 @@ def set_project(self, project): project_id = project.get("id") if project_id not in self.conn_list: - LOG.debug(_("Initiate connection for project %s" % project.get("name"))) + LOG.debug( + _("Initiate connection for project %s" % project.get("name")) + ) conn = self.conn.connect_as_project(project) self.conn_list[project_id] = conn LOG.debug(_("Connect as project %s" % project.get("name"))) @@ -27,10 +34,14 @@ def get_user_id(self): user_name = self.conn.config.auth["username"] if "user_domain_id" in self.conn.config.auth: domain_id = self.conn.config.auth["user_domain_id"] - user = self.conn.get_user(name_or_id=user_name, domain_id=domain_id) + user = self.conn.get_user( + name_or_id=user_name, domain_id=domain_id + ) elif "user_domain_name" in self.conn.config.auth: domain_name = self.conn.config.auth["user_domain_name"] - user = self.conn.get_user(name_or_id=user_name, domain_id=domain_name) + user = self.conn.get_user( + name_or_id=user_name, domain_id=domain_name + ) else: user = self.conn.get_user(name_or_id=user_name) return user.id @@ -66,19 +77,19 @@ def get_projects(self): def get_servers(self, project_id=None, all_projects=True, details=True): if project_id is not None: return self.conn.compute.servers( - details=details, all_projects=all_projects, project_id=project_id + details=details, + all_projects=all_projects, + project_id=project_id, ) else: - return self.conn.compute.servers(details=details, all_projects=all_projects) + return self.conn.compute.servers( + details=details, all_projects=all_projects + ) def get_volume(self, uuid, project_id): return self.conn.get_volume_by_id(uuid) def get_backup(self, uuid, project_id=None): - # return conn.block_storage.get_backup( - # project_id=project_id, backup_id=uuid, - # ) - # conn.block_storage.backups(volume_id=uuid,project_id=project_id) try: return self.conn.get_volume_backup(uuid) except exceptions.ResourceNotFound: @@ -93,9 +104,6 @@ def create_backup( name=None, incremental=False, ): - # return conn.block_storage.create_backup( - # volume_id=queue.volume_id, force=True, project_id=queue.project_id, name="name" - # ) return self.conn.create_volume_backup( volume_id=volume_id, force=force, @@ -112,7 +120,8 @@ def delete_backup(self, uuid, project_id=None, force=False): LOG.debug(f"Start deleting backup {uuid} in OpenStack.") try: self.conn.delete_volume_backup(uuid, force=force) - # TODO(Alex): After delete the backup generator, need to set the volume status again + # TODO(Alex): After delete the backup generator, + # need to set the volume status again except exceptions.ResourceNotFound: return None @@ -128,7 +137,8 @@ def get_backup_gigabytes_quota(self, project_id): # rewrite openstasdk._block_storage.get_volume_quotas # added usage flag - # ref: https://docs.openstack.org/api-ref/block-storage/v3/?expanded=#show-quota-usage-for-a-project + # ref: https://docs.openstack.org/api-ref/block-storage/v3/? + # expanded=#show-quota-usage-for-a-project def _get_volume_quotas(self, project_id, usage=True): """Get volume quotas for a project @@ -140,11 +150,15 @@ def _get_volume_quotas(self, project_id, usage=True): if usage: resp = self.conn.block_storage.get( - "/os-quota-sets/{project_id}?usage=True".format(project_id=project_id) + "/os-quota-sets/{project_id}?usage=True".format( + project_id=project_id + ) ) else: resp = self.conn.block_storage.get( "/os-quota-sets/{project_id}".format(project_id=project_id) ) - data = proxy._json_response(resp, error_message="cinder client call failed") + data = proxy._json_response( + resp, error_message="cinder client call failed" + ) return self.conn._get_and_munchify("quota_set", data) diff --git a/staffeln/common/service.py b/staffeln/common/service.py index d2ad7a5..c657896 100755 --- a/staffeln/common/service.py +++ b/staffeln/common/service.py @@ -11,11 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -import staffeln.conf from oslo_log import log as logging -from staffeln import objects + from staffeln.common import config +import staffeln.conf +from staffeln import objects CONF = staffeln.conf.CONF diff --git a/staffeln/common/short_id.py b/staffeln/common/short_id.py index 18be04c..e182ad1 100755 --- a/staffeln/common/short_id.py +++ b/staffeln/common/short_id.py @@ -2,10 +2,13 @@ The IDs each comprise 12 (lower-case) alphanumeric characters. """ +from __future__ import annotations + import base64 import uuid import six + from staffeln.i18n import _ diff --git a/staffeln/common/time.py b/staffeln/common/time.py index 103096a..79ef345 100644 --- a/staffeln/common/time.py +++ b/staffeln/common/time.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import re from dateutil.relativedelta import relativedelta @@ -6,8 +8,10 @@ DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S" regex = re.compile( - r"((?P\d+?)y)?((?P\d+?)mon)?((?P\d+?)w)?((?P\d+?)d)?" - r"((?P\d+?)h)?((?P\d+?)min)?((?P\d+?)s)?" + r"((?P\d+?)y)?((?P\d+?)mon)?" + r"((?P\d+?)w)?((?P\d+?)d)?" + r"((?P\d+?)h)?((?P\d+?)min)?" + r"((?P\d+?)s)?" ) @@ -45,7 +49,14 @@ def get_current_strtime(): def timeago( - years=0, months=0, weeks=0, days=0, hours=0, minutes=0, seconds=0, from_date=None + years=0, + months=0, + weeks=0, + days=0, + hours=0, + minutes=0, + seconds=0, + from_date=None, ): if from_date is None: from_date = timeutils.utcnow() diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py index 8375bb5..559d782 100755 --- a/staffeln/conductor/backup.py +++ b/staffeln/conductor/backup.py @@ -1,17 +1,23 @@ +from __future__ import annotations + import collections -from datetime import timedelta, timezone +from datetime import timedelta +from datetime import timezone -import staffeln.conf from openstack.exceptions import HttpException as OpenstackHttpException from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound from openstack.exceptions import SDKException as OpenstackSDKException from oslo_log import log from oslo_utils import timeutils -from staffeln import objects -from staffeln.common import constants, context, openstack + +from staffeln.common import constants +from staffeln.common import context +from staffeln.common import openstack from staffeln.common import time as xtime from staffeln.conductor import result +import staffeln.conf from staffeln.i18n import _ +from staffeln import objects CONF = staffeln.conf.CONF LOG = log.getLogger(__name__) @@ -115,8 +121,7 @@ def get_queue_task_by_id(self, task_id): return queue def create_queue(self, old_tasks): - """ - Create the queue of all the volumes for backup + """Create the queue of all the volumes for backup :param old_tasks: Task list not completed in the previous cycle :type: List @@ -129,7 +134,8 @@ def create_queue(self, old_tasks): for old_task in old_tasks: old_task_volume_list.append(old_task.volume_id) - # 2. add new tasks in the queue which are not existing in the old task list + # 2. add new tasks in the queue which are not existing in the old task + # list task_list = self.check_instance_volumes() for task in task_list: if task.volume_id not in old_task_volume_list: @@ -142,8 +148,8 @@ def filter_by_server_metadata(self, metadata): return False return ( - metadata[CONF.conductor.backup_metadata_key].lower() - == constants.BACKUP_ENABLED_KEY + metadata[CONF.conductor.backup_metadata_key].lower( + ) == constants.BACKUP_ENABLED_KEY ) else: return True @@ -157,9 +163,9 @@ def filter_by_volume_status(self, volume_id, project_id): res = volume["status"] in ("available", "in-use") if not res: reason = _( - "Volume %s is not triger new backup task because it is in %s status" - % (volume_id, volume["status"]) - ) + "Volume %s is not triger new backup task because " + "it is in %s status" % + (volume_id, volume["status"])) LOG.info(reason) return reason return res @@ -169,7 +175,7 @@ def filter_by_volume_status(self, volume_id, project_id): def purge_backups(self, project_id=None): LOG.info(f"Start pruge backup tasks for project {project_id}") - # TODO make all this in a single DB command + # We can consider make all these in a single DB command success_tasks = self.get_queues( filters={ "backup_status": constants.BACKUP_COMPLETED, @@ -273,7 +279,9 @@ def soft_remove_backup_task(self, backup_object): ) except OpenstackSDKException as e: - LOG.warn(f"Backup {backup_object.backup_id} deletion failed. {str(e)}") + LOG.warn( + f"Backup {backup_object.backup_id} deletion failed. {str(e)}" + ) # We don't delete backup object if any exception occured # backup_object.delete_backup() return False @@ -286,11 +294,11 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): LOG.warn( f"Project {project_id} for backup " f"{backup_object.backup_id} is not existing in " - "Openstack. Please check your access right to this project. " - "Skip this backup from remove now and will retry later." - ) - # Don't remove backup object, keep it and retry on next periodic task - # backup_object.delete_backup() + "Openstack. Please check your access right to this " + "project. " + "Skip this backup from remove now and will retry later.") + # Don't remove backup object, keep it and retry on next + # periodic task backup_object.delete_backup() return self.openstacksdk.set_project(self.project_list[project_id]) @@ -299,17 +307,19 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): ) if backup is None: LOG.info( - f"Backup {backup_object.backup_id} is removed from Openstack " - "or cinder-backup is not existing in the cloud. " - "Start removing backup object from Staffeln." - ) + f"Backup {backup_object.backup_id} is removed from " + "Openstack or cinder-backup is not existing in the " + "cloud. Start removing backup object from Staffeln.") return backup_object.delete_backup() self.openstacksdk.delete_backup(uuid=backup_object.backup_id) # Don't remove backup until it's officially removed from Cinder # backup_object.delete_backup() except Exception as e: - if skip_inc_err and "Incremental backups exist for this backup" in str(e): + if ( + skip_inc_err and ( + "Incremental backups exist for this backup" in str(e)) + ): LOG.debug(str(e)) else: LOG.info( @@ -318,8 +328,8 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): ) LOG.debug(f"deletion failed {str(e)}") - # Don't remove backup object, keep it and retry on next periodic task - # backup_object.delete_backup() + # Don't remove backup object, keep it and retry on next + # periodic task backup_object.delete_backup() def update_project_list(self): projects = self.openstacksdk.get_projects() @@ -327,8 +337,7 @@ def update_project_list(self): self.project_list[project.id] = project def _is_backup_required(self, volume_id): - """ - Decide if the backup required based on the backup history + """Decide if the backup required based on the backup history If there is any backup created during certain time, will not trigger new backup request. @@ -346,11 +355,15 @@ def _is_backup_required(self, volume_id): # Ignore backup interval return True interval = CONF.conductor.backup_min_interval - threshold_strtime = timeutils.utcnow() - timedelta(seconds=interval) + threshold_strtime = timeutils.utcnow() - timedelta( + seconds=interval + ) backups = self.get_backups( filters={ "volume_id__eq": volume_id, - "created_at__gt": threshold_strtime.astimezone(timezone.utc), + "created_at__gt": threshold_strtime.astimezone( + timezone.utc + ), } ) if backups: @@ -365,8 +378,7 @@ def _is_backup_required(self, volume_id): return True def _is_incremental(self, volume_id): - """ - Decide the backup method based on the backup history + """Decide the backup method based on the backup history It queries to select the last N backups from backup table and decide backup type as full if there is no full backup. @@ -395,16 +407,13 @@ def _is_incremental(self, volume_id): return True except Exception as e: LOG.debug( - _( - "Failed to get backup history to decide backup method. Reason: %s" - % str(e) - ) + "Failed to get backup history to decide backup " + f"method. Reason: {e}" ) return False def check_instance_volumes(self): - """ - Retrieves volume list to backup + """Retrieves volume list to backup Get the list of all the volumes from the project using openstacksdk. Function first list all the servers in the project and get the volumes @@ -422,10 +431,8 @@ def check_instance_volumes(self): servers = self.openstacksdk.get_servers(project_id=project.id) except OpenstackHttpException as ex: LOG.warn( - _( - "Failed to list servers in project %s. %s" - % (project.id, str(ex)) - ) + f"Failed to list servers in project {project.id}. " + f"{str(ex)} (status code: {ex.status_code})." ) continue for server in servers: @@ -490,8 +497,12 @@ def collect_instance_retention_map(self): try: servers = self.openstacksdk.get_servers(all_projects=True) - except OpenstackHttpException: - LOG.warn(_("Failed to list servers for all projects.")) + except OpenstackHttpException as ex: + servers = [] + LOG.warn( + f"Failed to list servers for all projects. " + f"{str(ex)} (status code: {ex.status_code})." + ) for server in servers: if CONF.conductor.retention_metadata_key in server.metadata: @@ -500,21 +511,20 @@ def collect_instance_retention_map(self): ].lower() if xtime.regex.fullmatch(server_retention_time): LOG.debug( - f"Found retention time ({server_retention_time}) defined for " - f"server {server.id}, Adding it retention reference map." - ) + f"Found retention time ({server_retention_time}) " + f"defined for server {server.id}, " + "Adding it retention reference map.") retention_map[server.id] = server_retention_time else: LOG.info( - f"Server retention time for instance {server.id} is incorrect. " - "Please follow 'ymwd" - "hmins' format." - ) + f"Server retention time for instance {server.id} is " + "incorrect. Please follow " + "'ymwd" + "hmins' format.") return retention_map def _volume_queue(self, task): - """ - Commits one backup task to queue table + """Commits one backup task to queue table :param task: One backup task :type: QueueMapping @@ -528,7 +538,8 @@ def _volume_queue(self, task): volume_queue.instance_name = task.instance_name volume_queue.volume_name = task.volume_name # NOTE(Oleks): Backup mode is inherited from backup service. - # Need to keep and navigate backup mode history, to decide a different mode per volume + # Need to keep and navigate backup mode history, to decide a different + # mode per volume volume_queue.incremental = task.incremental backup_method = "Incremental" if task.incremental else "Full" @@ -542,6 +553,7 @@ def _volume_queue(self, task): def create_volume_backup(self, task): """Initiate the backup of the volume + :param task: Provide the map of the volume that needs backup. This function will call the backupup api and change the @@ -563,7 +575,10 @@ def create_volume_backup(self, task): # NOTE(Alex): no need to wait because we have a cycle time out if project_id not in self.project_list: LOG.warn( - _("Project ID %s is not existing in project list" % project_id) + _( + "Project ID %s is not existing in project list" + % project_id + ) ) self.process_non_existing_backup(task) return @@ -571,8 +586,16 @@ def create_volume_backup(self, task): backup_method = "Incremental" if task.incremental else "Full" LOG.info( _( - ("%s Backup (name: %s) for volume %s creating in project %s") - % (backup_method, backup_name, task.volume_id, project_id) + ( + "%s Backup (name: %s) for volume %s creating " + "in project %s" + ) + % ( + backup_method, + backup_name, + task.volume_id, + project_id, + ) ) ) volume_backup = self.openstacksdk.create_backup( @@ -585,27 +608,31 @@ def create_volume_backup(self, task): task.backup_status = constants.BACKUP_WIP task.save() except OpenstackSDKException as error: - inc_err_msg = "No backups available to do an incremental backup" + inc_err_msg = ( + "No backups available to do an incremental backup" + ) if inc_err_msg in str(error): LOG.info( - "Retry to create full backup for volume %s instead of incremental." - % task.volume_id - ) + "Retry to create full backup for volume %s instead of " + "incremental." % + task.volume_id) task.incremental = False task.save() else: reason = _( - "Backup (name: %s) creation for the volume %s failled. %s" - % (backup_name, task.volume_id, str(error)[:64]) - ) + "Backup (name: %s) creation for the volume %s " + "failled. %s" % + (backup_name, task.volume_id, str(error)[ + :64])) LOG.warn( - "Backup (name: %s) creation for the volume %s failled. %s" - % (backup_name, task.volume_id, str(error)) - ) + "Backup (name: %s) creation for the volume %s " + "failled. %s" % + (backup_name, task.volume_id, str(error))) task.reason = reason task.backup_status = constants.BACKUP_FAILED task.save() - # Added extra exception as OpenstackSDKException does not handle the keystone unauthourized issue. + # Added extra exception as OpenstackSDKException does not handle + # the keystone unauthourized issue. except Exception as error: reason = _( "Backup (name: %s) creation for the volume %s failled. %s" @@ -627,7 +654,8 @@ def create_volume_backup(self, task): def process_pre_failed_backup(self, task): # 1.notify via email reason = _( - "The backup creation for the volume %s was prefailed." % task.volume_id + "The backup creation for the volume %s was prefailed." + % task.volume_id ) LOG.warn(reason) task.reason = reason @@ -636,7 +664,9 @@ def process_pre_failed_backup(self, task): def process_failed_backup(self, task): # 1. notify via email - reason = _("The status of backup for the volume %s is error." % task.volume_id) + reason = ( + f"The status of backup for the volume {task.volume_id} is error." + ) LOG.warn(reason) # 2. delete backup generator try: @@ -679,6 +709,7 @@ def process_using_backup(self, task): def check_volume_backup_status(self, queue): """Checks the backup status of the volume + :params: queue: Provide the map of the volume that needs backup status checked. Call the backups api to see if the backup is successful. @@ -698,7 +729,10 @@ def check_volume_backup_status(self, queue): if backup_gen is None: # TODO(Alex): need to check when it is none LOG.info( - _("[Beta] Backup status of %s is returning none." % (queue.backup_id)) + _( + "[Beta] Backup status of %s is returning none." + % (queue.backup_id) + ) ) self.process_non_existing_backup(queue) return @@ -707,13 +741,16 @@ def check_volume_backup_status(self, queue): elif backup_gen.status == "available": self.process_available_backup(queue) elif backup_gen.status == "creating": - LOG.info("Waiting for backup of %s to be completed" % queue.volume_id) + LOG.info( + "Waiting for backup of %s to be completed" % queue.volume_id + ) else: # "deleting", "restoring", "error_restoring" status self.process_using_backup(queue) def _volume_backup(self, task): # matching_backups = [ - # g for g in self.available_backups if g.backup_id == task.backup_id + # g for g in self.available_backups + # if g.backup_id == task.backup_id # ] # if not matching_backups: volume_backup = objects.Volume(self.ctx) diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py index 380c2f7..1d96d52 100755 --- a/staffeln/conductor/manager.py +++ b/staffeln/conductor/manager.py @@ -1,17 +1,23 @@ +from __future__ import annotations + +from datetime import timedelta +from datetime import timezone import threading import time -from datetime import timedelta, timezone import cotyledon -import staffeln.conf from futurist import periodics from oslo_log import log from oslo_utils import timeutils -from staffeln import objects -from staffeln.common import constants, context, lock + +from staffeln.common import constants +from staffeln.common import context +from staffeln.common import lock from staffeln.common import time as xtime from staffeln.conductor import backup as backup_controller +import staffeln.conf from staffeln.i18n import _ +from staffeln import objects LOG = log.getLogger(__name__) CONF = staffeln.conf.CONF @@ -58,7 +64,8 @@ def _process_wip_tasks(self): LOG.info(_("cycle timein")) for queue in queues_started: LOG.debug( - f"try to get lock and run task for volume: {queue.volume_id}." + "try to get lock and run task for volume: " + f"{queue.volume_id}." ) with lock.Lock( self.lock_mgt, queue.volume_id, remove_lock=True @@ -82,7 +89,8 @@ def _backup_cycle_timeout(self): LOG.info( _( "Recycle timeout format is invalid. " - "Follow ymwdhmins." + "Follow ymwdh" + "mins." ) ) time_delta_dict = xtime.parse_timedelta_string( @@ -117,7 +125,9 @@ def _process_todo_tasks(self): ) as t_lock: if t_lock.acquired: # Re-pulling status and make it's up-to-date - task = self.controller.get_queue_task_by_id(task_id=task.id) + task = self.controller.get_queue_task_by_id( + task_id=task.id + ) if task.backup_status == constants.BACKUP_PLANNED: task.backup_status = constants.BACKUP_INIT task.save() @@ -136,9 +146,13 @@ def _update_task_queue(self): def _report_backup_result(self): report_period = CONF.conductor.report_period - threshold_strtime = timeutils.utcnow() - timedelta(seconds=report_period) + threshold_strtime = timeutils.utcnow() - timedelta( + seconds=report_period + ) - filters = {"created_at__gt": threshold_strtime.astimezone(timezone.utc)} + filters = { + "created_at__gt": threshold_strtime.astimezone(timezone.utc) + } report_tss = objects.ReportTimestamp.list( # pylint: disable=E1120 context=self.ctx, filters=filters ) @@ -152,9 +166,13 @@ def _report_backup_result(self): threshold_strtime = timeutils.utcnow() - timedelta( seconds=report_period * 10 ) - filters = {"created_at__lt": threshold_strtime.astimezone(timezone.utc)} - old_report_tss = objects.ReportTimestamp.list( # pylint: disable=E1120 - context=self.ctx, filters=filters + filters = { + "created_at__lt": threshold_strtime.astimezone(timezone.utc) + } + old_report_tss = ( + objects.ReportTimestamp.list( # pylint: disable=E1120 + context=self.ctx, filters=filters + ) ) for report_ts in old_report_tss: report_ts.delete() @@ -163,7 +181,9 @@ def backup_engine(self, backup_service_period): LOG.info("Backup manager started %s" % str(time.time())) LOG.info("%s periodics" % self.name) - @periodics.periodic(spacing=backup_service_period, run_immediately=True) + @periodics.periodic( + spacing=backup_service_period, run_immediately=True + ) def backup_tasks(): with self.lock_mgt: with lock.Lock(self.lock_mgt, constants.PULLER) as puller: @@ -230,14 +250,18 @@ def is_retention(self, backup): if backup_age > retention_time: # Backup remain longer than retention, need to purge it. LOG.debug( - f"Found potential volume backup for retention: Backup ID: {backup.backup_id} " - f"with backup age: {backup_age} (Target retention time: {retention_time})." + "Found potential volume backup for retention: Backup " + f"ID: {backup.backup_id} " + f"with backup age: {backup_age} (Target retention " + f"time: {retention_time})." ) return True elif now - self.threshold_strtime < backup_age: LOG.debug( - f"Found potential volume backup for retention: Backup ID: {backup.backup_id} " - f"with backup age: {backup_age} (Default retention time: {self.threshold_strtime})." + "Found potential volume backup for retention: " + f"Backup ID: {backup.backup_id} " + f"with backup age: {backup_age} (Default retention " + f"time: {self.threshold_strtime})." ) return True return False @@ -245,10 +269,14 @@ def is_retention(self, backup): def rotation_engine(self, retention_service_period): LOG.info(f"{self.name} rotation_engine") - @periodics.periodic(spacing=retention_service_period, run_immediately=True) + @periodics.periodic( + spacing=retention_service_period, run_immediately=True + ) def rotation_tasks(): with self.lock_mgt: - with lock.Lock(self.lock_mgt, constants.RETENTION) as retention: + with lock.Lock( + self.lock_mgt, constants.RETENTION + ) as retention: if not retention.acquired: return @@ -264,8 +292,8 @@ def rotation_tasks(): # No way to judge retention if ( - self.threshold_strtime is None - and not self.instance_retention_map + self.threshold_strtime is None and ( + not self.instance_retention_map) ): return backup_instance_map = {} @@ -274,17 +302,21 @@ def rotation_tasks(): self.controller.update_project_list() for backup in self.get_backup_list(): - # Create backup instance map for later sorted by created_at. - # This can be use as base of judgement on delete a backup. - # The reason we need such list is because backup have - # dependency with each other after we enable incremental backup. + # Create backup instance map for later sorted by + # created_at. This can be use as base of judgement + # on delete a backup. The reason we need such list + # is because backup have dependency with each other + # after we enable incremental backup. # So we need to have information to judge on. if backup.instance_id in backup_instance_map: - backup_instance_map[backup.instance_id].append(backup) + backup_instance_map[backup.instance_id].append( + backup + ) else: backup_instance_map[backup.instance_id] = [backup] - # Sort backup instance map and use it to check backup create time and order. + # Sort backup instance map and use it to check backup + # create time and order. for instance_id in backup_instance_map: sorted_backup_list = sorted( backup_instance_map[instance_id], @@ -294,9 +326,11 @@ def rotation_tasks(): for backup in sorted_backup_list: if self.is_retention(backup): LOG.debug( - f"Retention: Try to remove volume backup {backup.backup_id}" + "Retention: Try to remove volume backup " + f"{backup.backup_id}" ) - # Try to delete and skip any incremental exist error. + # Try to delete and skip any incremental + # exist error. self.controller.hard_remove_volume_backup( backup, skip_inc_err=True ) @@ -319,7 +353,8 @@ def get_time_from_str(self, time_str, to_str=False): LOG.info( _( "Retention time format is invalid. " - "Follow ymwdhmins." + "Follow ymwdh" + "mins." ) ) return None diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py index be07aff..c602ae3 100644 --- a/staffeln/conductor/result.py +++ b/staffeln/conductor/result.py @@ -1,12 +1,15 @@ # Email notification package # This should be upgraded by integrating with mail server to send batch -import staffeln.conf +from __future__ import annotations + from oslo_log import log from oslo_utils import timeutils -from staffeln import objects -from staffeln.common import constants, email + +from staffeln.common import constants +from staffeln.common import email from staffeln.common import time as xtime -from staffeln.i18n import _ +import staffeln.conf +from staffeln import objects CONF = staffeln.conf.CONF LOG = log.getLogger(__name__) @@ -37,21 +40,23 @@ def send_result_email(self, project_id, subject=None, project_name=None): receiver = CONF.notification.receiver elif not CONF.notification.project_receiver_domain: try: - receiver = self.backup_mgt.openstacksdk.get_project_member_emails( - project_id + receiver = ( + self.backup_mgt.openstacksdk.get_project_member_emails( + project_id + ) ) if not receiver: LOG.warn( - f"No email can be found from members of project {project_id}. " - "Skip report now and will try to report later." - ) + "No email can be found from members of project " + f"{project_id}. " + "Skip report now and will try to report later.") return False except Exception as ex: LOG.warn( - f"Failed to fetch emails from project members with exception: {str(ex)} " + "Failed to fetch emails from project members with " + f"exception: {str(ex)} " "As also no receiver email or project receiver domain are " - "configured. Will try to report later." - ) + "configured. Will try to report later.") return False else: receiver_domain = CONF.notification.project_receiver_domain @@ -119,41 +124,35 @@ def publish(self, project_id=None, project_name=None): if success_tasks: success_volumes = "
".join( [ - ( - f"Volume ID: {str(e.volume_id)}, Backup ID: {str(e.backup_id)}, " - f"Backup mode: {'Incremental' if e.incremental else 'Full'}, " - f"Created at: {str(e.created_at)}, Last updated at: " - f"{str(e.updated_at)}" - ) - for e in success_tasks - ] - ) + (f"Volume ID: {str(e.volume_id)}, " + f"Backup ID: {str(e.backup_id)}, " + "Backup mode: " + f"{'Incremental' if e.incremental else 'Full'}, " + f"Created at: {str(e.created_at)}, Last updated at: " + f"{str(e.updated_at)}") for e in success_tasks]) else: success_volumes = "
" if failed_tasks: failed_volumes = "
".join( [ - ( - f"Volume ID: {str(e.volume_id)}, Reason: {str(e.reason)}, " - f"Created at: {str(e.created_at)}, Last updated at: " - f"{str(e.updated_at)}" - ) - for e in failed_tasks - ] - ) + (f"Volume ID: {str(e.volume_id)}, " + f"Reason: {str(e.reason)}, " + f"Created at: {str(e.created_at)}, Last updated at: " + f"{str(e.updated_at)}") for e in failed_tasks]) else: failed_volumes = "
" html += ( f"

Project: {project_name} (ID: {project_id})

" "

Quota Usage (Backup Gigabytes)

" - f"

Limit: {str(quota['limit'])} GB, In Use: " - f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} GB, Total " + f"

Limit: {str(quota['limit'])} " + "GB, In Use: " + f"{str(quota['in_use'])} GB, Reserved: {str(quota['reserved'])} " + "GB, Total " f"rate: {str(quota_usage)}

" "

Success List

" f"

{success_volumes}


" "

Failed List

" - f"

{failed_volumes}


" - ) + f"

{failed_volumes}


") self.content += html subject = f"Staffeln Backup result: {project_id}" reported = self.send_result_email( @@ -163,5 +162,4 @@ def publish(self, project_id=None, project_name=None): # Record success report self.create_report_record() return True - else: - return False + return False diff --git a/staffeln/conf/__init__.py b/staffeln/conf/__init__.py index 3289b63..4da72a5 100755 --- a/staffeln/conf/__init__.py +++ b/staffeln/conf/__init__.py @@ -1,5 +1,12 @@ +from __future__ import annotations + from oslo_config import cfg -from staffeln.conf import api, conductor, database, notify, paths + +from staffeln.conf import api +from staffeln.conf import conductor +from staffeln.conf import database +from staffeln.conf import notify +from staffeln.conf import paths CONF = cfg.CONF diff --git a/staffeln/conf/api.py b/staffeln/conf/api.py index e405d6a..16db057 100755 --- a/staffeln/conf/api.py +++ b/staffeln/conf/api.py @@ -1,4 +1,7 @@ +from __future__ import annotations + from oslo_config import cfg + from staffeln.i18n import _ api_group = cfg.OptGroup( @@ -16,7 +19,9 @@ cfg.PortOpt( "port", default=8808, - help=_("Staffeln API listens on this port number for incoming requests."), + help=_( + "Staffeln API listens on this port number for incoming requests." + ), ), cfg.BoolOpt("enabled_ssl", default=False, help=_("ssl enabled")), cfg.StrOpt("ssl_key_file", default=False, help=_("ssl key file path")), diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py index ab8e258..db0f840 100755 --- a/staffeln/conf/conductor.py +++ b/staffeln/conf/conductor.py @@ -1,11 +1,17 @@ +from __future__ import annotations + from oslo_config import cfg + from staffeln.common import constants from staffeln.i18n import _ conductor_group = cfg.OptGroup( "conductor", title="Conductor Options", - help=_("Options under this group are used " "to define Conductor's configuration."), + help=_( + "Options under this group are used " + "to define Conductor's configuration." + ), ) backup_opts = [ @@ -43,7 +49,8 @@ "backup_cycle_timout", regex=( r"((?P\d+?)y)?((?P\d+?)mon)?((?P\d+?)w)?" - r"((?P\d+?)d)?((?P\d+?)h)?((?P\d+?)min)?((?P\d+?)s)?" + r"((?P\d+?)d)?((?P\d+?)h)?((?P\d+?)min)?" + r"((?P\d+?)s)?" ), default=constants.DEFAULT_BACKUP_CYCLE_TIMEOUT, help=_( @@ -53,12 +60,15 @@ ), cfg.StrOpt( "backup_metadata_key", - help=_("The key string of metadata the VM, which requres back up, has"), + help=_( + "The key string of metadata the VM, which requres back up, has" + ), ), cfg.StrOpt( "retention_metadata_key", help=_( - "The key string of metadata the VM, which use as backup retention period." + "The key string of metadata the VM, which use as backup retention " + "period." ), ), cfg.IntOpt( @@ -96,7 +106,8 @@ "retention_time", regex=( r"((?P\d+?)y)?((?P\d+?)mon)?((?P\d+?)w)?" - r"((?P\d+?)d)?((?P\d+?)h)?((?P\d+?)min)?((?P\d+?)s)?" + r"((?P\d+?)d)?((?P\d+?)h)?((?P\d+?)min)?" + r"((?P\d+?)s)?" ), default="2w3d", help=_( @@ -110,13 +121,18 @@ coordination_group = cfg.OptGroup( "coordination", title="Coordination Options", - help=_("Options under this group are used to define Coordination's configuration."), + help=_( + "Options under this group are used to define Coordination's" + "configuration." + ), ) coordination_opts = [ cfg.StrOpt( - "backend_url", default="", help=_("lock coordination connection backend URL.") + "backend_url", + default="", + help=_("lock coordination connection backend URL."), ), ] diff --git a/staffeln/conf/database.py b/staffeln/conf/database.py index 761aa15..f4fe98e 100644 --- a/staffeln/conf/database.py +++ b/staffeln/conf/database.py @@ -1,5 +1,8 @@ +from __future__ import annotations + from oslo_config import cfg from oslo_db import options as oslo_db_options + from staffeln.conf import paths from staffeln.i18n import _ @@ -14,7 +17,9 @@ ) SQL_OPTS = [ - cfg.StrOpt("mysql_engine", default="InnoDB", help=_("MySQL engine to use.")), + cfg.StrOpt( + "mysql_engine", default="InnoDB", help=_("MySQL engine to use.") + ), ] diff --git a/staffeln/conf/notify.py b/staffeln/conf/notify.py index 21c67e8..bc3f4bf 100644 --- a/staffeln/conf/notify.py +++ b/staffeln/conf/notify.py @@ -1,10 +1,15 @@ +from __future__ import annotations + from oslo_config import cfg + from staffeln.i18n import _ notify_group = cfg.OptGroup( "notification", title="Notification options", - help=_("Options under this group are used to define notification settings."), + help=_( + "Options under this group are used to define notification settings." + ), ) email_opts = [ @@ -32,7 +37,8 @@ "The user name to authenticate with." ), ), - # We can remove the sender password as we are using postfix to send mail and we won't be authenticating. + # We can remove the sender password as we are using postfix to send + # mail and we won't be authenticating. cfg.StrOpt( "sender_pwd", help=_( diff --git a/staffeln/conf/paths.py b/staffeln/conf/paths.py index 7dbd9a1..7341e48 100644 --- a/staffeln/conf/paths.py +++ b/staffeln/conf/paths.py @@ -1,12 +1,17 @@ +from __future__ import annotations + import os from oslo_config import cfg + from staffeln.i18n import _ PATH_OPTS = [ cfg.StrOpt( "pybasedir", - default=os.path.abspath(os.path.join(os.path.dirname(__file__), "../")), + default=os.path.abspath( + os.path.join(os.path.dirname(__file__), "../") + ), help=_("Directory where the staffeln python module is installed."), ), cfg.StrOpt( diff --git a/staffeln/db/api.py b/staffeln/db/api.py index 2d10a05..3e22bde 100644 --- a/staffeln/db/api.py +++ b/staffeln/db/api.py @@ -1,9 +1,14 @@ """Base classes for storage engines""" + +from __future__ import annotations + from oslo_config import cfg from oslo_db import api as db_api _BACKEND_MAPPING = {"sqlalchemy": "staffeln.db.sqlalchemy.api"} -IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) +IMPL = db_api.DBAPI.from_config( + cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True +) def get_instance(): diff --git a/staffeln/db/base.py b/staffeln/db/base.py index de0d7c3..ad303d1 100755 --- a/staffeln/db/base.py +++ b/staffeln/db/base.py @@ -1,5 +1,7 @@ """Database setup and migration commands.""" +from __future__ import annotations + class base: def __init__(self): diff --git a/staffeln/db/migration.py b/staffeln/db/migration.py index 113116e..0d6eea9 100644 --- a/staffeln/db/migration.py +++ b/staffeln/db/migration.py @@ -1,7 +1,11 @@ """Database setup command""" -import staffeln.conf + +from __future__ import annotations + from stevedore import driver +import staffeln.conf + CONF = staffeln.conf.CONF _IMPL = None diff --git a/staffeln/db/sqlalchemy/alembic/env.py b/staffeln/db/sqlalchemy/alembic/env.py index 71461fe..970dcc7 100644 --- a/staffeln/db/sqlalchemy/alembic/env.py +++ b/staffeln/db/sqlalchemy/alembic/env.py @@ -9,10 +9,12 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import annotations from logging import config as log_config from alembic import context + from staffeln.db.sqlalchemy import api as sqla_api from staffeln.db.sqlalchemy import models @@ -44,7 +46,9 @@ def run_migrations_online(): """ engine = sqla_api.get_engine() with engine.connect() as connection: - context.configure(connection=connection, target_metadata=target_metadata) + context.configure( + connection=connection, target_metadata=target_metadata + ) with context.begin_transaction(): context.run_migrations() diff --git a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py index a16f27c..6d53f0e 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py +++ b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py @@ -7,17 +7,21 @@ """ # revision identifiers, used by Alembic. +from __future__ import annotations + revision = "041d9a0f1159" down_revision = "" -import sqlalchemy as sa # noqa: E402 from alembic import op # noqa: E402 +import sqlalchemy as sa # noqa: E402 def upgrade(): op.add_column( - "queue_data", sa.Column("volume_name", sa.String(length=100), nullable=True) + "queue_data", + sa.Column("volume_name", sa.String(length=100), nullable=True), ) op.add_column( - "queue_data", sa.Column("instance_name", sa.String(length=100), nullable=True) + "queue_data", + sa.Column("instance_name", sa.String(length=100), nullable=True), ) diff --git a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py index f78c91d..4ebaf9f 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py +++ b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py @@ -7,11 +7,13 @@ """ # revision identifiers, used by Alembic. +from __future__ import annotations + revision = "2b2b9df199bd" down_revision = "ebdbed01e9a7" -import sqlalchemy as sa # noqa: E402 from alembic import op # noqa: E402 +import sqlalchemy as sa # noqa: E402 def upgrade(): diff --git a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py index 1abed60..5635fd9 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py +++ b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py @@ -1,6 +1,8 @@ -import sqlalchemy as sa +from __future__ import annotations + from alembic import op from oslo_log import log +import sqlalchemy as sa """add report timestamp @@ -21,7 +23,11 @@ def upgrade(): op.create_table( "report_timestamp", sa.Column( - "id", sa.String(36), primary_key=True, nullable=False, autoincrement=True + "id", + sa.String(36), + primary_key=True, + nullable=False, + autoincrement=True, ), sa.Column("created_at", sa.DateTime), sa.Column("updated_at", sa.DateTime), diff --git a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py index b2ed161..45cc8a8 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py +++ b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py @@ -7,15 +7,21 @@ """ # revision identifiers, used by Alembic. +from __future__ import annotations + revision = "ebdbed01e9a7" down_revision = "041d9a0f1159" -import sqlalchemy as sa # noqa: E402 from alembic import op # noqa: E402 +import sqlalchemy as sa # noqa: E402 def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.add_column("backup_data", sa.Column("incremental", sa.Boolean(), nullable=True)) - op.add_column("queue_data", sa.Column("incremental", sa.Boolean(), nullable=True)) + op.add_column( + "backup_data", sa.Column("incremental", sa.Boolean(), nullable=True) + ) + op.add_column( + "queue_data", sa.Column("incremental", sa.Boolean(), nullable=True) + ) # ### end Alembic commands ### diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py index 3cda5e7..335094e 100644 --- a/staffeln/db/sqlalchemy/api.py +++ b/staffeln/db/sqlalchemy/api.py @@ -1,5 +1,7 @@ """SQLAlchemy storage backend.""" +from __future__ import annotations + import datetime import operator @@ -8,9 +10,12 @@ from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log -from oslo_utils import strutils, timeutils, uuidutils +from oslo_utils import strutils +from oslo_utils import timeutils +from oslo_utils import uuidutils from sqlalchemy.inspection import inspect from sqlalchemy.orm import exc + from staffeln.common import short_id from staffeln.db.sqlalchemy import models @@ -54,6 +59,7 @@ def model_query(model, *args, **kwargs): def add_identity_filter(query, value): """Adds an identity filter to a query. + Filters results by ID, if supplied value is a valid integer. Otherwise attempts to filter results by backup_id. :param query: Initial query to add filter to. @@ -161,6 +167,7 @@ def _add_queues_filters(self, query, filters): def _add_filters(self, query, model, filters=None, plain_fields=None): """Add filters while listing the columns from database table""" + # timestamp_mixin_fields = ["created_at", "updated_at"] filters = filters or {} @@ -178,9 +185,8 @@ def __add_simple_filter(self, query, model, fieldname, value, operator_): field = getattr(model, fieldname) if ( - fieldname != "deleted" - and value - and field.type.python_type is datetime.datetime + fieldname != "deleted" and value and ( + field.type.python_type is datetime.datetime) ): if not isinstance(value, datetime.datetime): value = timeutils.parse_isotime(value) @@ -323,7 +329,10 @@ def _get_queue(self, context, fieldname, value): try: return self._get( - context, model=models.Queue_data, fieldname=fieldname, value=value + context, + model=models.Queue_data, + fieldname=fieldname, + value=value, ) except: # noqa: E722 LOG.error("Queue not found") @@ -338,7 +347,9 @@ def get_backup_by_backup_id(self, context, backup_id): """Get the column from the backup_data with matching backup_id""" try: - return self._get_backup(context, fieldname="backup_id", value=backup_id) + return self._get_backup( + context, fieldname="backup_id", value=backup_id + ) except: # noqa: E722 LOG.error("Backup not found with backup_id %s." % backup_id) @@ -347,7 +358,10 @@ def _get_backup(self, context, fieldname, value): try: return self._get( - context, model=models.Backup_data, fieldname=fieldname, value=value + context, + model=models.Backup_data, + fieldname=fieldname, + value=value, ) except: # noqa: E722 LOG.error("Backup resource not found.") @@ -365,7 +379,9 @@ def get_report_timestamp_list(self, *args, **kwargs): def create_report_timestamp(self, values): try: - report_timestamp_data = self._create(models.Report_timestamp, values) + report_timestamp_data = self._create( + models.Report_timestamp, values + ) except db_exc.DBDuplicateEntry: LOG.error("Report Timestamp ID already exists.") return report_timestamp_data diff --git a/staffeln/db/sqlalchemy/migration.py b/staffeln/db/sqlalchemy/migration.py index 3a34c2b..e50d757 100644 --- a/staffeln/db/sqlalchemy/migration.py +++ b/staffeln/db/sqlalchemy/migration.py @@ -1,7 +1,10 @@ +from __future__ import annotations + import os -import staffeln.conf from oslo_db.sqlalchemy.migration_cli import manager + +import staffeln.conf from staffeln.db.sqlalchemy import api as sqla_api from staffeln.db.sqlalchemy import models diff --git a/staffeln/db/sqlalchemy/models.py b/staffeln/db/sqlalchemy/models.py index c186ddc..a0e3815 100644 --- a/staffeln/db/sqlalchemy/models.py +++ b/staffeln/db/sqlalchemy/models.py @@ -1,11 +1,15 @@ """ SQLAlchemy models for staffeln service """ + +from __future__ import annotations + import urllib.parse as urlparse from oslo_db.sqlalchemy import models from sqlalchemy import Boolean, Column, Integer, String, UniqueConstraint from sqlalchemy.ext.declarative import declarative_base + from staffeln import conf CONF = conf.CONF @@ -14,7 +18,10 @@ def table_args(): engine_name = urlparse.urlparse(CONF.database.connection).scheme if engine_name == "mysql": - return {"mysql_engine": CONF.database.mysql_engine, "mysql_charset": "utf8"} + return { + "mysql_engine": CONF.database.mysql_engine, + "mysql_charset": "utf8", + } return None diff --git a/staffeln/exception.py b/staffeln/exception.py index e561506..3f8a34e 100644 --- a/staffeln/exception.py +++ b/staffeln/exception.py @@ -13,10 +13,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - """Staffeln base exception handling.""" +from __future__ import annotations -from typing import Optional, Union # noqa: H301 +from typing import Optional +from typing import Union from oslo_log import log as logging diff --git a/staffeln/i18n.py b/staffeln/i18n.py index 09fe8aa..cbef6e5 100755 --- a/staffeln/i18n.py +++ b/staffeln/i18n.py @@ -2,6 +2,8 @@ See http://docs.openstack.org/developer/oslo.i18n/usage.html . """ +from __future__ import annotations + import oslo_i18n DOMAIN = "staffeln" diff --git a/staffeln/objects/__init__.py b/staffeln/objects/__init__.py index 2af8df0..defa200 100755 --- a/staffeln/objects/__init__.py +++ b/staffeln/objects/__init__.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from .queue import Queue # noqa: F401 from .report import ReportTimestamp # noqa: F401 from .volume import Volume # noqa: F401 diff --git a/staffeln/objects/base.py b/staffeln/objects/base.py index 8dd6f94..60a6fe2 100755 --- a/staffeln/objects/base.py +++ b/staffeln/objects/base.py @@ -1,8 +1,11 @@ """Staffeln common internal object model""" +from __future__ import annotations + from oslo_utils import versionutils from oslo_versionedobjects import base as ovoo_base from oslo_versionedobjects import fields as ovoo_fields + from staffeln import objects remotable_classmethod = ovoo_base.remotable_classmethod @@ -30,7 +33,9 @@ class StaffelnObject(ovoo_base.VersionedObject): OBJ_PROJECT_NAMESPACE = "staffeln" def as_dict(self): - return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)} + return { + k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k) + } class StaffelnObjectSerializer(ovoo_base.VersionedObjectSerializer): @@ -48,9 +53,14 @@ class StaffelnPersistentObject(ovoo_base.VersionedObject): object_fields = {} def obj_refresh(self, loaded_object): - fields = (field for field in self.fields if field not in self.object_fields) + fields = ( + field for field in self.fields if field not in self.object_fields + ) for field in fields: - if self.obj_attr_is_set(field) and self[field] != loaded_object[field]: + if ( + self.obj_attr_is_set(field) and ( + self[field] != loaded_object[field]) + ): self[field] = loaded_object[field] @staticmethod diff --git a/staffeln/objects/fields.py b/staffeln/objects/fields.py index 3f6c2a7..95ed59c 100644 --- a/staffeln/objects/fields.py +++ b/staffeln/objects/fields.py @@ -1,4 +1,7 @@ """Utility method for objects""" + +from __future__ import annotations + from oslo_serialization import jsonutils from oslo_versionedobjects import fields diff --git a/staffeln/objects/queue.py b/staffeln/objects/queue.py index db49c21..b80c2d8 100644 --- a/staffeln/objects/queue.py +++ b/staffeln/objects/queue.py @@ -1,4 +1,7 @@ +from __future__ import annotations + from oslo_versionedobjects import fields as ovoo_fields + from staffeln.db import api as db_api from staffeln.objects import base from staffeln.objects import fields as sfeild @@ -6,7 +9,9 @@ @base.StaffelnObjectRegistry.register class Queue( - base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat + base.StaffelnPersistentObject, + base.StaffelnObject, + base.StaffelnObjectDictCompat, ): VERSION = "1.2" # Version 1.0: Initial version @@ -37,6 +42,7 @@ def list(cls, context, filters=None): # pylint: disable=E0213 @base.remotable_classmethod def get_by_id(cls, context, id): # pylint: disable=E0213 """Find a queue task based on id + :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first @@ -46,6 +52,7 @@ def get_by_id(cls, context, id): # pylint: disable=E0213 :param backup_id: the backup id of volume in queue. :returns: a :class:`Queue` object. """ + db_queue = cls.dbapi.get_queue_by_id(context, id) queue = cls._from_db_object(cls(context), db_queue) return queue @@ -53,6 +60,7 @@ def get_by_id(cls, context, id): # pylint: disable=E0213 @base.remotable def create(self): """Create a :class:`Backup_data` record in the DB""" + values = self.obj_get_changes() db_queue = self.dbapi.create_queue(values) return self._from_db_object(self, db_queue) @@ -73,4 +81,5 @@ def refresh(self): @base.remotable def delete_queue(self): """Soft Delete the :class:`Queue_data` from the DB""" + self.dbapi.soft_delete_queue(self.id) diff --git a/staffeln/objects/report.py b/staffeln/objects/report.py index e851a93..7bc1885 100644 --- a/staffeln/objects/report.py +++ b/staffeln/objects/report.py @@ -1,4 +1,7 @@ +from __future__ import annotations + from oslo_versionedobjects import fields as ovoo_fields + from staffeln.db import api as db_api from staffeln.objects import base from staffeln.objects import fields as sfeild @@ -6,7 +9,9 @@ @base.StaffelnObjectRegistry.register class ReportTimestamp( - base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat + base.StaffelnPersistentObject, + base.StaffelnObject, + base.StaffelnObjectDictCompat, ): VERSION = "1.0" # Version 1.0: Initial version @@ -21,7 +26,9 @@ class ReportTimestamp( @base.remotable_classmethod def list(cls, context, filters=None): # pylint: disable=E0213 - db_report = cls.dbapi.get_report_timestamp_list(context, filters=filters) + db_report = cls.dbapi.get_report_timestamp_list( + context, filters=filters + ) return [cls._from_db_object(cls(context), obj) for obj in db_report] @base.remotable diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py index a4f24f6..88f952a 100644 --- a/staffeln/objects/volume.py +++ b/staffeln/objects/volume.py @@ -1,4 +1,7 @@ +from __future__ import annotations + from oslo_versionedobjects import fields as ovoo_fields + from staffeln.db import api as db_api from staffeln.objects import base from staffeln.objects import fields as sfeild @@ -6,7 +9,9 @@ @base.StaffelnObjectRegistry.register class Volume( - base.StaffelnPersistentObject, base.StaffelnObject, base.StaffelnObjectDictCompat + base.StaffelnPersistentObject, + base.StaffelnObject, + base.StaffelnObjectDictCompat, ): VERSION = "1.1" # Version 1.0: Initial version @@ -31,7 +36,9 @@ def list(cls, context, filters=None, **kwargs): # pylint: disable=E0213 :param filters: dict mapping the filter to a value. """ - db_backups = cls.dbapi.get_backup_list(context, filters=filters, **kwargs) + db_backups = cls.dbapi.get_backup_list( + context, filters=filters, **kwargs + ) return [cls._from_db_object(cls(context), obj) for obj in db_backups] @@ -58,6 +65,7 @@ def save(self): @base.remotable def refresh(self): """Loads updates for this :class:`Backup_data`. + Loads a backup with the same backup_id from the database and checks for updated attributes. Updates are applied from the loaded backup column by column, if there are any updates. @@ -71,8 +79,11 @@ def delete_backup(self): self.dbapi.soft_delete_backup(self.id) @base.remotable_classmethod - def get_backup_by_backup_id(cls, context, backup_id): # pylint: disable=E0213 + def get_backup_by_backup_id( + cls, context, backup_id + ): # pylint: disable=E0213 """Find a backup based on backup_id + :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first diff --git a/staffeln/tests/base.py b/staffeln/tests/base.py index 1c30cdb..00059c0 100755 --- a/staffeln/tests/base.py +++ b/staffeln/tests/base.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # @@ -14,10 +13,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import annotations from oslotest import base class TestCase(base.BaseTestCase): - """Test case base class for all unit tests.""" diff --git a/staffeln/tests/common/test_openstacksdk.py b/staffeln/tests/common/test_openstacksdk.py new file mode 100644 index 0000000..e516a30 --- /dev/null +++ b/staffeln/tests/common/test_openstacksdk.py @@ -0,0 +1,503 @@ +# SPDX-License-Identifier: Apache-2.0 +from __future__ import annotations + +from unittest import mock + +from openstack import exceptions as openstack_exc +import tenacity + +from staffeln.common import openstack as s_openstack +from staffeln import conf +from staffeln.tests import base + + +class OpenstackSDKTest(base.TestCase): + + def setUp(self): + super(OpenstackSDKTest, self).setUp() + self.m_c = mock.MagicMock() + with mock.patch("openstack.connect", return_value=self.m_c): + self.openstack = s_openstack.OpenstackSDK() + self.m_sleep = mock.Mock() + func_list = [ + "get_user_id", + "get_projects", + "get_servers", + "get_role_assignments", + "get_user", + "get_project_member_emails", + "get_volume", + "get_backup", + "delete_backup", + "get_backup_quota", + "get_backup_gigabytes_quota", + ] + for i in func_list: + getattr(self.openstack, i).retry.sleep = ( # pylint: disable=E1101 + self.m_sleep + ) + getattr(self.openstack, i).retry.stop = ( # pylint: disable=E1101 + tenacity.stop_after_attempt(2) + ) + + self.fake_user = mock.MagicMock(id="foo", email="foo@foo.com") + self.fake_volume = mock.MagicMock(id="fake_volume") + self.fake_backup = mock.MagicMock(id="fake_backup") + self.fake_role_assignment = mock.MagicMock(user="foo") + self.fake_role_assignment2 = mock.MagicMock(user={"id": "bar"}) + + def _test_http_error( + self, m_func, retry_func, status_code, call_count=1, **kwargs + ): + m_func.side_effect = openstack_exc.HttpException( + http_status=status_code + ) + exc = self.assertRaises( + openstack_exc.HttpException, + getattr(self.openstack, retry_func), + **kwargs, + ) + self.assertEqual(status_code, exc.status_code) + skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace( + ' ', '').split(',') + if str(status_code) not in skip_retry_codes: + if call_count == 1: + self.m_sleep.assert_called_once_with(1.0) + else: + self.m_sleep.assert_has_calls( + [mock.call(1.0) for c in range(call_count)] + ) + else: + self.m_sleep.assert_not_called() + + def _test_non_http_error(self, m_func, retry_func, **kwargs): + m_func.side_effect = KeyError + self.assertRaises( + KeyError, getattr(self.openstack, retry_func), **kwargs + ) + self.m_sleep.assert_not_called() + + def test_get_servers(self): + self.m_c.compute.servers = mock.MagicMock(return_value=[]) + self.assertEqual(self.openstack.get_servers(), []) + self.m_c.compute.servers.assert_called_once_with( + details=True, all_projects=True + ) + + def test_get_servers_non_http_error(self): + self._test_non_http_error(self.m_c.compute.servers, "get_servers") + + def test_get_servers_conf_skip_http_error(self): + conf.CONF.set_override('skip_retry_codes', '403,', 'openstack') + self._test_http_error( + self.m_c.compute.servers, "get_servers", status_code=403 + ) + self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes) + + def test_get_servers_conf_skip_http_error_not_hit(self): + conf.CONF.set_override('skip_retry_codes', '403,', 'openstack') + self._test_http_error( + self.m_c.compute.servers, "get_servers", status_code=404 + ) + self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes) + + def test_get_servers_404_http_error(self): + self._test_http_error( + self.m_c.compute.servers, "get_servers", status_code=404 + ) + + def test_get_servers_500_http_error(self): + self._test_http_error( + self.m_c.compute.servers, "get_servers", status_code=500 + ) + + def test_get_projects(self): + self.m_c.list_projects = mock.MagicMock(return_value=[]) + self.assertEqual(self.openstack.get_projects(), []) + self.m_c.list_projects.assert_called_once_with() + + def test_get_projects_non_http_error(self): + self._test_non_http_error(self.m_c.list_projects, "get_projects") + + def test_get_projects_404_http_error(self): + self._test_http_error( + self.m_c.list_projects, "get_projects", status_code=404 + ) + + def test_get_projects_500_http_error(self): + self._test_http_error( + self.m_c.list_projects, "get_projects", status_code=500 + ) + + def test_get_user_id(self): + self.m_c.get_user = mock.MagicMock(return_value=self.fake_user) + self.assertEqual(self.openstack.get_user_id(), "foo") + self.m_c.get_user.assert_called_once_with(name_or_id=mock.ANY) + + def test_get_user_id_non_http_error(self): + self._test_non_http_error(self.m_c.get_user, "get_user_id") + + def test_get_user_id_404_http_error(self): + self._test_http_error( + self.m_c.get_user, "get_user_id", status_code=404 + ) + + def test_get_user_id_500_http_error(self): + self._test_http_error( + self.m_c.get_user, "get_user_id", status_code=500 + ) + + def test_get_user(self): + self.m_c.get_user = mock.MagicMock(return_value=self.fake_user) + self.assertEqual( + self.openstack.get_user(user_id=self.fake_user.id), self.fake_user + ) + self.m_c.get_user.assert_called_once_with(name_or_id=self.fake_user.id) + + def test_get_user_non_http_error(self): + self._test_non_http_error( + self.m_c.get_user, "get_user", user_id=self.fake_user.id + ) + + def test_get_user_404_http_error(self): + self._test_http_error( + self.m_c.get_user, + "get_user", + status_code=404, + user_id=self.fake_user.id, + ) + + def test_get_user_500_http_error(self): + self._test_http_error( + self.m_c.get_user, + "get_user", + status_code=500, + user_id=self.fake_user.id, + ) + + def test_get_role_assignments(self): + self.m_c.list_role_assignments = mock.MagicMock(return_value=[]) + self.assertEqual( + self.openstack.get_role_assignments(project_id="foo"), [] + ) + self.m_c.list_role_assignments.assert_called_once_with( + filters={"project": "foo"} + ) + + def test_get_role_assignments_non_http_error(self): + self._test_non_http_error( + self.m_c.list_role_assignments, + "get_role_assignments", + project_id="foo", + ) + + def test_get_role_assignments_404_http_error(self): + self._test_http_error( + self.m_c.list_role_assignments, + "get_role_assignments", + status_code=404, + project_id="foo", + ) + + def test_get_role_assignments_500_http_error(self): + self._test_http_error( + self.m_c.list_role_assignments, + "get_role_assignments", + status_code=500, + project_id="foo", + ) + + def test_get_project_member_emails(self): + # Make sure we cover both get_user pattern + self.m_c.list_role_assignments = mock.MagicMock( + return_value=[ + self.fake_role_assignment, + self.fake_role_assignment2, + ] + ) + self.m_c.get_user = mock.MagicMock(return_value=self.fake_user) + self.assertEqual( + self.openstack.get_project_member_emails(project_id="foo"), + [self.fake_user.email, self.fake_user.email], + ) + self.m_c.list_role_assignments.assert_called_once_with( + filters={"project": "foo"} + ) + self.m_c.get_user.assert_has_calls( + [ + mock.call(name_or_id=self.fake_role_assignment.user), + mock.call( + name_or_id=self.fake_role_assignment2.user.get("id") + ), + ] + ) + + def test_get_project_member_emails_non_http_error(self): + self._test_non_http_error( + self.m_c.list_role_assignments, + "get_project_member_emails", + project_id="foo", + ) + + def test_get_project_member_emails_404_http_error(self): + self._test_http_error( + self.m_c.list_role_assignments, + "get_project_member_emails", + status_code=404, + project_id="foo", + ) + + def test_get_project_member_emails_500_http_error(self): + self._test_http_error( + self.m_c.list_role_assignments, + "get_project_member_emails", + status_code=500, + call_count=3, + project_id="foo", + ) + + def test_get_volume(self): + self.m_c.get_volume_by_id = mock.MagicMock( + return_value=self.fake_volume + ) + self.assertEqual( + self.openstack.get_volume( + uuid=self.fake_volume.id, project_id="bar" + ), + self.fake_volume, + ) + self.m_c.get_volume_by_id.assert_called_once_with(self.fake_volume.id) + + def test_get_volume_non_http_error(self): + self._test_non_http_error( + self.m_c.get_volume_by_id, + "get_volume", + uuid="foo", + project_id="bar", + ) + + def test_get_volume_404_http_error(self): + self._test_http_error( + self.m_c.get_volume_by_id, + "get_volume", + status_code=404, + uuid="foo", + project_id="bar", + ) + + def test_get_volume_500_http_error(self): + self._test_http_error( + self.m_c.get_volume_by_id, + "get_volume", + status_code=500, + uuid="foo", + project_id="bar", + ) + + def test_get_backup(self): + self.m_c.get_volume_backup = mock.MagicMock( + return_value=self.fake_backup + ) + self.assertEqual( + self.openstack.get_backup( + uuid=self.fake_backup.id, project_id="bar" + ), + self.fake_backup, + ) + self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id) + + def test_get_backup_not_found(self): + self.m_c.get_volume_backup = mock.MagicMock( + side_effect=openstack_exc.ResourceNotFound + ) + self.assertEqual( + self.openstack.get_backup( + uuid=self.fake_backup.id, project_id="bar" + ), + None, + ) + self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id) + + def test_get_backup_non_http_error(self): + self._test_non_http_error( + self.m_c.get_volume_backup, + "get_backup", + uuid="foo", + project_id="bar", + ) + + def test_get_backup_404_http_error(self): + self._test_http_error( + self.m_c.get_volume_backup, + "get_backup", + status_code=404, + uuid="foo", + project_id="bar", + ) + + def test_get_backup_500_http_error(self): + self._test_http_error( + self.m_c.get_volume_backup, + "get_backup", + status_code=500, + uuid="foo", + project_id="bar", + ) + + def test_delete_backup(self): + self.m_c.delete_volume_backup = mock.MagicMock( + return_value=self.fake_backup + ) + self.assertEqual( + self.openstack.delete_backup( + uuid=self.fake_backup.id, project_id="bar" + ), + None, + ) + self.m_c.delete_volume_backup.assert_called_once_with( + self.fake_backup.id, force=False + ) + + def test_delete_backup_not_found(self): + self.m_c.delete_volume_backup = mock.MagicMock( + side_effect=openstack_exc.ResourceNotFound + ) + self.assertEqual( + self.openstack.delete_backup( + uuid=self.fake_backup.id, project_id="bar" + ), + None, + ) + self.m_c.delete_volume_backup.assert_called_once_with( + self.fake_backup.id, force=False + ) + + def test_delete_backup_non_http_error(self): + self._test_non_http_error( + self.m_c.delete_volume_backup, + "delete_backup", + uuid="foo", + project_id="bar", + ) + + def test_delete_backup_404_http_error(self): + self._test_http_error( + self.m_c.delete_volume_backup, + "delete_backup", + status_code=404, + uuid="foo", + project_id="bar", + ) + + def test_delete_backup_500_http_error(self): + self._test_http_error( + self.m_c.delete_volume_backup, + "delete_backup", + status_code=500, + uuid="foo", + project_id="bar", + ) + + @mock.patch("openstack.proxy._json_response") + def test_get_backup_quota(self, m_j_r): + self.m_c.block_storage.get = mock.MagicMock(status_code=200) + self.m_gam = mock.MagicMock() + self.m_c._get_and_munchify = self.m_gam + self.m_gam.return_value = mock.MagicMock(backups=[self.fake_backup.id]) + self.assertEqual( + [self.fake_backup.id], + self.openstack.get_backup_quota(project_id="bar"), + ) + self.m_c.block_storage.get.assert_called_once_with( + "/os-quota-sets/bar?usage=True" + ) + + def test_get_backup_quota_non_http_error(self): + self._test_non_http_error( + self.m_c.block_storage.get, "get_backup_quota", project_id="bar" + ) + + def test_get_backup_quota_404_http_error(self): + self._test_http_error( + self.m_c.block_storage.get, + "get_backup_quota", + status_code=404, + project_id="bar", + ) + + def test_get_backup_quota_500_http_error(self): + self._test_http_error( + self.m_c.block_storage.get, + "get_backup_quota", + status_code=500, + project_id="bar", + ) + + @mock.patch("openstack.proxy._json_response") + def test_get_backup_gigabytes_quota(self, m_j_r): + self.m_c.block_storage.get = mock.MagicMock(status_code=200) + self.m_gam = mock.MagicMock() + self.m_c._get_and_munchify = self.m_gam + self.m_gam.return_value = mock.MagicMock( + backup_gigabytes=[self.fake_backup.id] + ) + self.assertEqual( + [self.fake_backup.id], + self.openstack.get_backup_gigabytes_quota(project_id="bar"), + ) + self.m_c.block_storage.get.assert_called_once_with( + "/os-quota-sets/bar?usage=True" + ) + + def test_get_backup_gigabytes_quota_non_http_error(self): + self._test_non_http_error( + self.m_c.block_storage.get, + "get_backup_gigabytes_quota", + project_id="bar", + ) + + def test_get_backup_gigabytes_quota_404_http_error(self): + self._test_http_error( + self.m_c.block_storage.get, + "get_backup_gigabytes_quota", + status_code=404, + project_id="bar", + ) + + def test_get_backup_gigabytes_quota_500_http_error(self): + self._test_http_error( + self.m_c.block_storage.get, + "get_backup_gigabytes_quota", + status_code=500, + project_id="bar", + ) + + @mock.patch("openstack.proxy._json_response") + def test_get_volume_quotas(self, m_j_r): + self.m_c.block_storage.get = mock.MagicMock(status_code=200) + self.m_gam_return = mock.MagicMock() + self.m_gam = mock.MagicMock(return_value=self.m_gam_return) + self.m_c._get_and_munchify = self.m_gam + self.assertEqual( + self.m_gam_return, + self.openstack._get_volume_quotas(project_id="bar"), + ) + self.m_c.block_storage.get.assert_called_once_with( + "/os-quota-sets/bar?usage=True" + ) + self.m_gam.assert_called_once_with("quota_set", m_j_r()) + + @mock.patch("openstack.proxy._json_response") + def test_get_volume_quotas_no_usage(self, m_j_r): + self.m_c.block_storage.get = mock.MagicMock(status_code=200) + self.m_gam_return = mock.MagicMock() + self.m_gam = mock.MagicMock(return_value=self.m_gam_return) + self.m_c._get_and_munchify = self.m_gam + self.assertEqual( + self.m_gam_return, + self.openstack._get_volume_quotas(project_id="bar", usage=False), + ) + self.m_c.block_storage.get.assert_called_once_with( + "/os-quota-sets/bar" + ) + self.m_gam.assert_called_once_with("quota_set", m_j_r()) diff --git a/staffeln/tests/test_staffeln.py b/staffeln/tests/test_staffeln.py index 6c7c5f3..5ef2a92 100755 --- a/staffeln/tests/test_staffeln.py +++ b/staffeln/tests/test_staffeln.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -11,13 +10,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - """ test_staffeln ---------------------------------- Tests for `staffeln` module. """ +from __future__ import annotations from staffeln.tests import base diff --git a/staffeln/version.py b/staffeln/version.py index efe79df..b943573 100755 --- a/staffeln/version.py +++ b/staffeln/version.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pbr.version version_info = pbr.version.VersionInfo("staffeln") diff --git a/tox.ini b/tox.ini index 4812539..bdbf5da 100755 --- a/tox.ini +++ b/tox.ini @@ -27,7 +27,7 @@ deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} [testenv:pep8] -commands = +commands = flake8 [testenv:cover] From 51a0a3083032f93d7661310eec3824cdcb60ab83 Mon Sep 17 00:00:00 2001 From: ricolin Date: Fri, 1 Nov 2024 20:46:49 +0800 Subject: [PATCH 3/8] Add unut test CI --- .github/workflows/linters.yaml | 27 ++++++++++++++++++++++++++- .github/workflows/unittests.yaml | 29 +++++++++++++++++++++++++++++ staffeln/common/time.py | 2 +- staffeln/db/sqlalchemy/api.py | 18 +++++++++--------- 4 files changed, 65 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/unittests.yaml diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml index 6745ffe..0de0e33 100644 --- a/.github/workflows/linters.yaml +++ b/.github/workflows/linters.yaml @@ -1,5 +1,15 @@ name: linters -on: push + +on: + workflow_dispatch: + push: + branches: + - 'main' + tags: + - 'v*' + pull_request: + branches: + - 'main' jobs: super-lint: @@ -14,3 +24,18 @@ jobs: VALIDATE_DOCKERFILE_HADOLINT: false VALIDATE_PYTHON_MYPY: false VALIDATE_JSCPD: false + pep8: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y tox + + - name: Run tox -e pep8 + run: tox -e pep8 diff --git a/.github/workflows/unittests.yaml b/.github/workflows/unittests.yaml new file mode 100644 index 0000000..946d2fc --- /dev/null +++ b/.github/workflows/unittests.yaml @@ -0,0 +1,29 @@ +name: unuttests + +on: + workflow_dispatch: + push: + branches: + - 'main' + tags: + - 'v*' + pull_request: + branches: + - 'main' + +jobs: + unuttest: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y tox + + - name: Run tox -e py3 + run: tox -e py3 diff --git a/staffeln/common/time.py b/staffeln/common/time.py index 79ef345..08fd5d1 100644 --- a/staffeln/common/time.py +++ b/staffeln/common/time.py @@ -35,7 +35,7 @@ def parse_timedelta_string(time_str): if empty_flag: return None return time_params - except: # noqa: E722 + except Exception: # noqa: E722 return None diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py index 335094e..4919f28 100644 --- a/staffeln/db/sqlalchemy/api.py +++ b/staffeln/db/sqlalchemy/api.py @@ -294,7 +294,7 @@ def update_backup(self, backup_id, values): try: return self._update(models.Backup_data, backup_id, values) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("backup resource not found.") def create_queue(self, values): @@ -316,7 +316,7 @@ def update_queue(self, id, values): try: return self._update(models.Queue_data, id, values) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Queue resource not found.") def get_queue_by_id(self, context, id): @@ -334,13 +334,13 @@ def _get_queue(self, context, fieldname, value): fieldname=fieldname, value=value, ) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Queue not found") def soft_delete_queue(self, id): try: return self._soft_delete(models.Queue_data, id) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Queue Not found.") def get_backup_by_backup_id(self, context, backup_id): @@ -350,7 +350,7 @@ def get_backup_by_backup_id(self, context, backup_id): return self._get_backup( context, fieldname="backup_id", value=backup_id ) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Backup not found with backup_id %s." % backup_id) def _get_backup(self, context, fieldname, value): @@ -363,13 +363,13 @@ def _get_backup(self, context, fieldname, value): fieldname=fieldname, value=value, ) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Backup resource not found.") def soft_delete_backup(self, id): try: return self._soft_delete(models.Backup_data, id) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Backup Not found.") def get_report_timestamp_list(self, *args, **kwargs): @@ -390,11 +390,11 @@ def update_report_timestamp(self, id, values): try: return self._update(models.Report_timestamp, id, values) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Report Timestamp resource not found.") def soft_delete_report_timestamp(self, id): try: return self._soft_delete(models.Report_timestamp, id) - except: # noqa: E722 + except Exception: # noqa: E722 LOG.error("Report Timestamp Not found.") From 7a5582f890b861c29e006c2fada5c29a0a31dac2 Mon Sep 17 00:00:00 2001 From: ricolin Date: Fri, 1 Nov 2024 21:11:14 +0800 Subject: [PATCH 4/8] fix superlint --- .github/workflows/linters.yaml | 2 ++ staffeln/common/openstack.py | 1 - staffeln/conductor/backup.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml index 0de0e33..2018b3a 100644 --- a/.github/workflows/linters.yaml +++ b/.github/workflows/linters.yaml @@ -22,6 +22,8 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} VALIDATE_ALL_CODEBASE: true VALIDATE_DOCKERFILE_HADOLINT: false + VALIDATE_PYTHON_ISORT: false + VALIDATE_PYTHON_BLACK: false VALIDATE_PYTHON_MYPY: false VALIDATE_JSCPD: false pep8: diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py index ea566e3..e9f730c 100644 --- a/staffeln/common/openstack.py +++ b/staffeln/common/openstack.py @@ -3,7 +3,6 @@ from openstack import exceptions from openstack import proxy from oslo_log import log -import tenacity from staffeln.common import auth from staffeln.i18n import _ diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py index 559d782..5319035 100755 --- a/staffeln/conductor/backup.py +++ b/staffeln/conductor/backup.py @@ -675,8 +675,8 @@ def process_failed_backup(self, task): except OpenstackHttpException as ex: LOG.warn( _( - "Failed to delete volume backup %s. %s. Need to delete manually." - % (task.backup_id, str(ex)) + "Failed to delete volume backup %s. %s. Need " + "to delete manually." % (task.backup_id, str(ex)) ) ) task.reason = reason From aca56c701f2d2f2e4ee150f0979116f4ece811e3 Mon Sep 17 00:00:00 2001 From: ricolin Date: Wed, 13 Nov 2024 14:36:13 +0800 Subject: [PATCH 5/8] Move UT and linters jobs to Zuul --- .github/workflows/linters.yaml | 43 -------------------------------- .github/workflows/unittests.yaml | 29 --------------------- tox.ini | 14 ++++++----- zuul.d/jobs.yaml | 9 +++++++ zuul.d/project.yaml | 5 ++++ 5 files changed, 22 insertions(+), 78 deletions(-) delete mode 100644 .github/workflows/linters.yaml delete mode 100644 .github/workflows/unittests.yaml create mode 100644 zuul.d/jobs.yaml create mode 100644 zuul.d/project.yaml diff --git a/.github/workflows/linters.yaml b/.github/workflows/linters.yaml deleted file mode 100644 index 2018b3a..0000000 --- a/.github/workflows/linters.yaml +++ /dev/null @@ -1,43 +0,0 @@ -name: linters - -on: - workflow_dispatch: - push: - branches: - - 'main' - tags: - - 'v*' - pull_request: - branches: - - 'main' - -jobs: - super-lint: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: github/super-linter@v4 - env: - DEFAULT_BRANCH: main - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - VALIDATE_ALL_CODEBASE: true - VALIDATE_DOCKERFILE_HADOLINT: false - VALIDATE_PYTHON_ISORT: false - VALIDATE_PYTHON_BLACK: false - VALIDATE_PYTHON_MYPY: false - VALIDATE_JSCPD: false - pep8: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Setup Python - uses: actions/setup-python@v4 - - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y tox - - - name: Run tox -e pep8 - run: tox -e pep8 diff --git a/.github/workflows/unittests.yaml b/.github/workflows/unittests.yaml deleted file mode 100644 index 946d2fc..0000000 --- a/.github/workflows/unittests.yaml +++ /dev/null @@ -1,29 +0,0 @@ -name: unuttests - -on: - workflow_dispatch: - push: - branches: - - 'main' - tags: - - 'v*' - pull_request: - branches: - - 'main' - -jobs: - unuttest: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Setup Python - uses: actions/setup-python@v4 - - - name: Install dependencies - run: | - sudo apt-get update - sudo apt-get install -y tox - - - name: Run tox -e py3 - run: tox -e py3 diff --git a/tox.ini b/tox.ini index bdbf5da..7bf9cf3 100755 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py37,pep8 +envlist = py3,linters skipsdist = True sitepackages = False skip_missing_interpreters = True @@ -21,15 +21,11 @@ install_commands = pip install {opts} {packages} -[testenv:py3] +[testenv:{py3,py38,py39,py310}] basepython = python3 deps = -r{toxinidir}/test-requirements.txt commands = stestr run --slowest {posargs} -[testenv:pep8] -commands = - flake8 - [testenv:cover] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -45,6 +41,12 @@ commands = coverage xml -o cover/coverage.xml coverage report +[testenv:linters] +skipsdist = True +deps = + pre-commit +commands = + pre-commit run --all-files --show-diff-on-failure [testenv:venv] commands = {posargs} diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml new file mode 100644 index 0000000..d7e620d --- /dev/null +++ b/zuul.d/jobs.yaml @@ -0,0 +1,9 @@ +- job: + name: staffeln-linters + parent: tox-linters + +- job: + name: staffeln-unit + parent: tox + vars: + tox_envlist: py3 diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml new file mode 100644 index 0000000..065d68e --- /dev/null +++ b/zuul.d/project.yaml @@ -0,0 +1,5 @@ +- project: + check: + jobs: + - staffeln-linters + - staffeln-unit From d6d582ba6a8ec0f76d9498983d7b90750e9f5f00 Mon Sep 17 00:00:00 2001 From: ricolin Date: Thu, 5 Dec 2024 10:11:27 +0800 Subject: [PATCH 6/8] update precommit --- .flake8 | 2 ++ .pre-commit-config.yaml | 29 ++++++++++++++++++++++++----- requirements.txt | 1 - test-requirements.txt | 1 + tox.ini | 9 --------- 5 files changed, 27 insertions(+), 15 deletions(-) create mode 100644 .flake8 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..6deafc2 --- /dev/null +++ b/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1dbe818..4975271 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,30 @@ repos: + - repo: https://github.com/compilerla/conventional-pre-commit + rev: v2.0.0 + hooks: + - id: conventional-pre-commit + stages: + - commit-msg + + - repo: https://github.com/psf/black + rev: 24.8.0 + hooks: + - id: black + + - repo: https://github.com/pycqa/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + + - repo: https://github.com/pycqa/isort + rev: 5.13.2 + hooks: + - id: isort + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: + - id: check-yaml + args: [--allow-multiple-documents] - id: end-of-file-fixer - id: trailing-whitespace - - repo: https://github.com/pycqa/flake8 - rev: 7.0.0 - hooks: - - id: flake8 - args: [--max-line-length=79] diff --git a/requirements.txt b/requirements.txt index 2372ce2..140798f 100755 --- a/requirements.txt +++ b/requirements.txt @@ -20,4 +20,3 @@ parse tooz # Apache-2.0 sherlock>=0.4.1 # MIT kubernetes # Apache-2.0 -pre-commit diff --git a/test-requirements.txt b/test-requirements.txt index f09cf7f..2c28785 100755 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -9,3 +9,4 @@ python-subunit>=0.0.18 # Apache-2.0/BSD oslotest>=1.10.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 testtools>=1.4.0 # MIT +pre-commit diff --git a/tox.ini b/tox.ini index 7bf9cf3..2de6fda 100755 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,6 @@ setenv = TERM=linux deps = - flake8 -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} @@ -50,11 +49,3 @@ commands = [testenv:venv] commands = {posargs} - -[flake8] -# E123, E125 skipped as they are invalid PEP-8. - -show-source = True -ignore = E123,E125 -builtins = _ -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build From 916cf01794af01f8ec1bc5baca360b99767299ed Mon Sep 17 00:00:00 2001 From: ricolin Date: Thu, 5 Dec 2024 10:30:16 +0800 Subject: [PATCH 7/8] reformate for black --- doc/source/conf.py | 4 +- staffeln/api/app.py | 6 +- staffeln/api/middleware/parsable_error.py | 5 +- staffeln/cmd/api.py | 2 +- staffeln/cmd/conductor.py | 2 +- staffeln/cmd/dbmanage.py | 10 +- staffeln/common/email.py | 4 +- staffeln/common/lock.py | 7 +- staffeln/common/openstack.py | 27 ++--- staffeln/common/service.py | 2 +- staffeln/conductor/backup.py | 94 ++++++--------- staffeln/conductor/manager.py | 54 +++------ staffeln/conductor/result.py | 50 ++++---- staffeln/conf/__init__.py | 6 +- staffeln/conf/api.py | 4 +- staffeln/conf/conductor.py | 12 +- staffeln/conf/database.py | 4 +- staffeln/conf/notify.py | 4 +- staffeln/conf/paths.py | 4 +- staffeln/db/api.py | 4 +- staffeln/db/sqlalchemy/alembic/env.py | 4 +- .../versions/041d9a0f1159_backup_add_names.py | 2 +- ...d_add_reason_column_to_queue_data_table.py | 2 +- .../5b2e78435231_add_report_timestamp.py | 2 +- .../ebdbed01e9a7_added_incremental_field.py | 10 +- staffeln/db/sqlalchemy/api.py | 17 +-- staffeln/exception.py | 3 +- staffeln/objects/base.py | 13 +-- staffeln/objects/report.py | 4 +- staffeln/objects/volume.py | 8 +- staffeln/tests/common/test_openstacksdk.py | 109 +++++------------- 31 files changed, 164 insertions(+), 315 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 7a12515..9055a03 100755 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -40,8 +40,8 @@ # openstackdocstheme options openstackdocs_repo_name = "openstack/staffeln" openstackdocs_bug_project = ( - "replace with the name of the project on " - "Launchpad or the ID from Storyboard") + "replace with the name of the project on " "Launchpad or the ID from Storyboard" +) openstackdocs_bug_tag = "" # If true, '()' will be appended to :func: etc. cross-reference text. diff --git a/staffeln/api/app.py b/staffeln/api/app.py index b19b9b8..8b54dad 100755 --- a/staffeln/api/app.py +++ b/staffeln/api/app.py @@ -1,12 +1,10 @@ from __future__ import annotations -from flask import Flask -from flask import request -from flask import Response +from flask import Flask, Response, request from oslo_log import log -from staffeln.common import context from staffeln import objects +from staffeln.common import context ctx = context.make_context() app = Flask(__name__) diff --git a/staffeln/api/middleware/parsable_error.py b/staffeln/api/middleware/parsable_error.py index 2443201..343c8c2 100755 --- a/staffeln/api/middleware/parsable_error.py +++ b/staffeln/api/middleware/parsable_error.py @@ -80,10 +80,7 @@ def replacement_start_response(status, headers, exc_info=None): state["status_code"] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception( - _( - "ErrorDocumentMiddleware received an invalid " - "status %s" - ) + _("ErrorDocumentMiddleware received an invalid " "status %s") % status ) else: diff --git a/staffeln/cmd/api.py b/staffeln/cmd/api.py index ba2ce09..a175089 100755 --- a/staffeln/cmd/api.py +++ b/staffeln/cmd/api.py @@ -7,9 +7,9 @@ from oslo_log import log as logging +import staffeln.conf from staffeln.api import app as api_app from staffeln.common import service -import staffeln.conf from staffeln.i18n import _ CONF = staffeln.conf.CONF diff --git a/staffeln/cmd/conductor.py b/staffeln/cmd/conductor.py index 3b2b84e..eaec3cf 100755 --- a/staffeln/cmd/conductor.py +++ b/staffeln/cmd/conductor.py @@ -5,9 +5,9 @@ import cotyledon from cotyledon import oslo_config_glue +import staffeln.conf from staffeln.common import service from staffeln.conductor import manager -import staffeln.conf CONF = staffeln.conf.CONF diff --git a/staffeln/cmd/dbmanage.py b/staffeln/cmd/dbmanage.py index 433b7d7..bd6f01d 100644 --- a/staffeln/cmd/dbmanage.py +++ b/staffeln/cmd/dbmanage.py @@ -8,8 +8,8 @@ from oslo_config import cfg -from staffeln.common import service from staffeln import conf +from staffeln.common import service from staffeln.db import migration CONF = conf.CONF @@ -27,14 +27,10 @@ def do_upgrade(): def add_command_parsers(subparsers): - parser = subparsers.add_parser( - "create_schema", help="Create the database schema." - ) + parser = subparsers.add_parser("create_schema", help="Create the database schema.") parser.set_defaults(func=DBCommand.create_schema) - parser = subparsers.add_parser( - "upgrade", help="Upgrade the database schema." - ) + parser = subparsers.add_parser("upgrade", help="Upgrade the database schema.") parser.add_argument("revision", nargs="?") parser.set_defaults(func=DBCommand.do_upgrade) diff --git a/staffeln/common/email.py b/staffeln/common/email.py index 79d7225..368028b 100644 --- a/staffeln/common/email.py +++ b/staffeln/common/email.py @@ -2,11 +2,11 @@ from __future__ import annotations +import smtplib +from email import utils from email.header import Header from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from email import utils -import smtplib from oslo_log import log diff --git a/staffeln/common/lock.py b/staffeln/common/lock.py index 5f21bf6..9f7db41 100644 --- a/staffeln/common/lock.py +++ b/staffeln/common/lock.py @@ -5,15 +5,14 @@ import os import re import sys -from typing import Optional # noqa: H301 import uuid +from typing import Optional # noqa: H301 -from oslo_log import log import sherlock +from oslo_log import log from tooz import coordination -from staffeln import conf -from staffeln import exception +from staffeln import conf, exception CONF = conf.CONF LOG = log.getLogger(__name__) diff --git a/staffeln/common/openstack.py b/staffeln/common/openstack.py index e9f730c..710d112 100644 --- a/staffeln/common/openstack.py +++ b/staffeln/common/openstack.py @@ -1,7 +1,6 @@ from __future__ import annotations -from openstack import exceptions -from openstack import proxy +from openstack import exceptions, proxy from oslo_log import log from staffeln.common import auth @@ -20,9 +19,7 @@ def set_project(self, project): project_id = project.get("id") if project_id not in self.conn_list: - LOG.debug( - _("Initiate connection for project %s" % project.get("name")) - ) + LOG.debug(_("Initiate connection for project %s" % project.get("name"))) conn = self.conn.connect_as_project(project) self.conn_list[project_id] = conn LOG.debug(_("Connect as project %s" % project.get("name"))) @@ -33,14 +30,10 @@ def get_user_id(self): user_name = self.conn.config.auth["username"] if "user_domain_id" in self.conn.config.auth: domain_id = self.conn.config.auth["user_domain_id"] - user = self.conn.get_user( - name_or_id=user_name, domain_id=domain_id - ) + user = self.conn.get_user(name_or_id=user_name, domain_id=domain_id) elif "user_domain_name" in self.conn.config.auth: domain_name = self.conn.config.auth["user_domain_name"] - user = self.conn.get_user( - name_or_id=user_name, domain_id=domain_name - ) + user = self.conn.get_user(name_or_id=user_name, domain_id=domain_name) else: user = self.conn.get_user(name_or_id=user_name) return user.id @@ -81,9 +74,7 @@ def get_servers(self, project_id=None, all_projects=True, details=True): project_id=project_id, ) else: - return self.conn.compute.servers( - details=details, all_projects=all_projects - ) + return self.conn.compute.servers(details=details, all_projects=all_projects) def get_volume(self, uuid, project_id): return self.conn.get_volume_by_id(uuid) @@ -149,15 +140,11 @@ def _get_volume_quotas(self, project_id, usage=True): if usage: resp = self.conn.block_storage.get( - "/os-quota-sets/{project_id}?usage=True".format( - project_id=project_id - ) + "/os-quota-sets/{project_id}?usage=True".format(project_id=project_id) ) else: resp = self.conn.block_storage.get( "/os-quota-sets/{project_id}".format(project_id=project_id) ) - data = proxy._json_response( - resp, error_message="cinder client call failed" - ) + data = proxy._json_response(resp, error_message="cinder client call failed") return self.conn._get_and_munchify("quota_set", data) diff --git a/staffeln/common/service.py b/staffeln/common/service.py index c657896..98399f6 100755 --- a/staffeln/common/service.py +++ b/staffeln/common/service.py @@ -15,9 +15,9 @@ from oslo_log import log as logging -from staffeln.common import config import staffeln.conf from staffeln import objects +from staffeln.common import config CONF = staffeln.conf.CONF diff --git a/staffeln/conductor/backup.py b/staffeln/conductor/backup.py index 5319035..760132c 100755 --- a/staffeln/conductor/backup.py +++ b/staffeln/conductor/backup.py @@ -1,8 +1,7 @@ from __future__ import annotations import collections -from datetime import timedelta -from datetime import timezone +from datetime import timedelta, timezone from openstack.exceptions import HttpException as OpenstackHttpException from openstack.exceptions import ResourceNotFound as OpenstackResourceNotFound @@ -10,14 +9,12 @@ from oslo_log import log from oslo_utils import timeutils -from staffeln.common import constants -from staffeln.common import context -from staffeln.common import openstack +import staffeln.conf +from staffeln import objects +from staffeln.common import constants, context, openstack from staffeln.common import time as xtime from staffeln.conductor import result -import staffeln.conf from staffeln.i18n import _ -from staffeln import objects CONF = staffeln.conf.CONF LOG = log.getLogger(__name__) @@ -148,8 +145,8 @@ def filter_by_server_metadata(self, metadata): return False return ( - metadata[CONF.conductor.backup_metadata_key].lower( - ) == constants.BACKUP_ENABLED_KEY + metadata[CONF.conductor.backup_metadata_key].lower() + == constants.BACKUP_ENABLED_KEY ) else: return True @@ -164,8 +161,8 @@ def filter_by_volume_status(self, volume_id, project_id): if not res: reason = _( "Volume %s is not triger new backup task because " - "it is in %s status" % - (volume_id, volume["status"])) + "it is in %s status" % (volume_id, volume["status"]) + ) LOG.info(reason) return reason return res @@ -279,9 +276,7 @@ def soft_remove_backup_task(self, backup_object): ) except OpenstackSDKException as e: - LOG.warn( - f"Backup {backup_object.backup_id} deletion failed. {str(e)}" - ) + LOG.warn(f"Backup {backup_object.backup_id} deletion failed. {str(e)}") # We don't delete backup object if any exception occured # backup_object.delete_backup() return False @@ -296,7 +291,8 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): f"{backup_object.backup_id} is not existing in " "Openstack. Please check your access right to this " "project. " - "Skip this backup from remove now and will retry later.") + "Skip this backup from remove now and will retry later." + ) # Don't remove backup object, keep it and retry on next # periodic task backup_object.delete_backup() return @@ -309,17 +305,15 @@ def hard_remove_volume_backup(self, backup_object, skip_inc_err=False): LOG.info( f"Backup {backup_object.backup_id} is removed from " "Openstack or cinder-backup is not existing in the " - "cloud. Start removing backup object from Staffeln.") + "cloud. Start removing backup object from Staffeln." + ) return backup_object.delete_backup() self.openstacksdk.delete_backup(uuid=backup_object.backup_id) # Don't remove backup until it's officially removed from Cinder # backup_object.delete_backup() except Exception as e: - if ( - skip_inc_err and ( - "Incremental backups exist for this backup" in str(e)) - ): + if skip_inc_err and ("Incremental backups exist for this backup" in str(e)): LOG.debug(str(e)) else: LOG.info( @@ -355,15 +349,11 @@ def _is_backup_required(self, volume_id): # Ignore backup interval return True interval = CONF.conductor.backup_min_interval - threshold_strtime = timeutils.utcnow() - timedelta( - seconds=interval - ) + threshold_strtime = timeutils.utcnow() - timedelta(seconds=interval) backups = self.get_backups( filters={ "volume_id__eq": volume_id, - "created_at__gt": threshold_strtime.astimezone( - timezone.utc - ), + "created_at__gt": threshold_strtime.astimezone(timezone.utc), } ) if backups: @@ -407,8 +397,7 @@ def _is_incremental(self, volume_id): return True except Exception as e: LOG.debug( - "Failed to get backup history to decide backup " - f"method. Reason: {e}" + "Failed to get backup history to decide backup " f"method. Reason: {e}" ) return False @@ -513,14 +502,16 @@ def collect_instance_retention_map(self): LOG.debug( f"Found retention time ({server_retention_time}) " f"defined for server {server.id}, " - "Adding it retention reference map.") + "Adding it retention reference map." + ) retention_map[server.id] = server_retention_time else: LOG.info( f"Server retention time for instance {server.id} is " "incorrect. Please follow " "'ymwd" - "hmins' format.") + "hmins' format." + ) return retention_map def _volume_queue(self, task): @@ -575,10 +566,7 @@ def create_volume_backup(self, task): # NOTE(Alex): no need to wait because we have a cycle time out if project_id not in self.project_list: LOG.warn( - _( - "Project ID %s is not existing in project list" - % project_id - ) + _("Project ID %s is not existing in project list" % project_id) ) self.process_non_existing_backup(task) return @@ -586,10 +574,7 @@ def create_volume_backup(self, task): backup_method = "Incremental" if task.incremental else "Full" LOG.info( _( - ( - "%s Backup (name: %s) for volume %s creating " - "in project %s" - ) + ("%s Backup (name: %s) for volume %s creating " "in project %s") % ( backup_method, backup_name, @@ -608,26 +593,23 @@ def create_volume_backup(self, task): task.backup_status = constants.BACKUP_WIP task.save() except OpenstackSDKException as error: - inc_err_msg = ( - "No backups available to do an incremental backup" - ) + inc_err_msg = "No backups available to do an incremental backup" if inc_err_msg in str(error): LOG.info( "Retry to create full backup for volume %s instead of " - "incremental." % - task.volume_id) + "incremental." % task.volume_id + ) task.incremental = False task.save() else: reason = _( "Backup (name: %s) creation for the volume %s " - "failled. %s" % - (backup_name, task.volume_id, str(error)[ - :64])) + "failled. %s" % (backup_name, task.volume_id, str(error)[:64]) + ) LOG.warn( "Backup (name: %s) creation for the volume %s " - "failled. %s" % - (backup_name, task.volume_id, str(error))) + "failled. %s" % (backup_name, task.volume_id, str(error)) + ) task.reason = reason task.backup_status = constants.BACKUP_FAILED task.save() @@ -654,8 +636,7 @@ def create_volume_backup(self, task): def process_pre_failed_backup(self, task): # 1.notify via email reason = _( - "The backup creation for the volume %s was prefailed." - % task.volume_id + "The backup creation for the volume %s was prefailed." % task.volume_id ) LOG.warn(reason) task.reason = reason @@ -664,9 +645,7 @@ def process_pre_failed_backup(self, task): def process_failed_backup(self, task): # 1. notify via email - reason = ( - f"The status of backup for the volume {task.volume_id} is error." - ) + reason = f"The status of backup for the volume {task.volume_id} is error." LOG.warn(reason) # 2. delete backup generator try: @@ -729,10 +708,7 @@ def check_volume_backup_status(self, queue): if backup_gen is None: # TODO(Alex): need to check when it is none LOG.info( - _( - "[Beta] Backup status of %s is returning none." - % (queue.backup_id) - ) + _("[Beta] Backup status of %s is returning none." % (queue.backup_id)) ) self.process_non_existing_backup(queue) return @@ -741,9 +717,7 @@ def check_volume_backup_status(self, queue): elif backup_gen.status == "available": self.process_available_backup(queue) elif backup_gen.status == "creating": - LOG.info( - "Waiting for backup of %s to be completed" % queue.volume_id - ) + LOG.info("Waiting for backup of %s to be completed" % queue.volume_id) else: # "deleting", "restoring", "error_restoring" status self.process_using_backup(queue) diff --git a/staffeln/conductor/manager.py b/staffeln/conductor/manager.py index 1d96d52..c43e13b 100755 --- a/staffeln/conductor/manager.py +++ b/staffeln/conductor/manager.py @@ -1,23 +1,20 @@ from __future__ import annotations -from datetime import timedelta -from datetime import timezone import threading import time +from datetime import timedelta, timezone import cotyledon from futurist import periodics from oslo_log import log from oslo_utils import timeutils -from staffeln.common import constants -from staffeln.common import context -from staffeln.common import lock +import staffeln.conf +from staffeln import objects +from staffeln.common import constants, context, lock from staffeln.common import time as xtime from staffeln.conductor import backup as backup_controller -import staffeln.conf from staffeln.i18n import _ -from staffeln import objects LOG = log.getLogger(__name__) CONF = staffeln.conf.CONF @@ -125,9 +122,7 @@ def _process_todo_tasks(self): ) as t_lock: if t_lock.acquired: # Re-pulling status and make it's up-to-date - task = self.controller.get_queue_task_by_id( - task_id=task.id - ) + task = self.controller.get_queue_task_by_id(task_id=task.id) if task.backup_status == constants.BACKUP_PLANNED: task.backup_status = constants.BACKUP_INIT task.save() @@ -146,13 +141,9 @@ def _update_task_queue(self): def _report_backup_result(self): report_period = CONF.conductor.report_period - threshold_strtime = timeutils.utcnow() - timedelta( - seconds=report_period - ) + threshold_strtime = timeutils.utcnow() - timedelta(seconds=report_period) - filters = { - "created_at__gt": threshold_strtime.astimezone(timezone.utc) - } + filters = {"created_at__gt": threshold_strtime.astimezone(timezone.utc)} report_tss = objects.ReportTimestamp.list( # pylint: disable=E1120 context=self.ctx, filters=filters ) @@ -166,13 +157,9 @@ def _report_backup_result(self): threshold_strtime = timeutils.utcnow() - timedelta( seconds=report_period * 10 ) - filters = { - "created_at__lt": threshold_strtime.astimezone(timezone.utc) - } - old_report_tss = ( - objects.ReportTimestamp.list( # pylint: disable=E1120 - context=self.ctx, filters=filters - ) + filters = {"created_at__lt": threshold_strtime.astimezone(timezone.utc)} + old_report_tss = objects.ReportTimestamp.list( # pylint: disable=E1120 + context=self.ctx, filters=filters ) for report_ts in old_report_tss: report_ts.delete() @@ -181,9 +168,7 @@ def backup_engine(self, backup_service_period): LOG.info("Backup manager started %s" % str(time.time())) LOG.info("%s periodics" % self.name) - @periodics.periodic( - spacing=backup_service_period, run_immediately=True - ) + @periodics.periodic(spacing=backup_service_period, run_immediately=True) def backup_tasks(): with self.lock_mgt: with lock.Lock(self.lock_mgt, constants.PULLER) as puller: @@ -269,14 +254,10 @@ def is_retention(self, backup): def rotation_engine(self, retention_service_period): LOG.info(f"{self.name} rotation_engine") - @periodics.periodic( - spacing=retention_service_period, run_immediately=True - ) + @periodics.periodic(spacing=retention_service_period, run_immediately=True) def rotation_tasks(): with self.lock_mgt: - with lock.Lock( - self.lock_mgt, constants.RETENTION - ) as retention: + with lock.Lock(self.lock_mgt, constants.RETENTION) as retention: if not retention.acquired: return @@ -291,9 +272,8 @@ def rotation_tasks(): ) # No way to judge retention - if ( - self.threshold_strtime is None and ( - not self.instance_retention_map) + if self.threshold_strtime is None and ( + not self.instance_retention_map ): return backup_instance_map = {} @@ -309,9 +289,7 @@ def rotation_tasks(): # after we enable incremental backup. # So we need to have information to judge on. if backup.instance_id in backup_instance_map: - backup_instance_map[backup.instance_id].append( - backup - ) + backup_instance_map[backup.instance_id].append(backup) else: backup_instance_map[backup.instance_id] = [backup] diff --git a/staffeln/conductor/result.py b/staffeln/conductor/result.py index c602ae3..6bc24f5 100644 --- a/staffeln/conductor/result.py +++ b/staffeln/conductor/result.py @@ -5,11 +5,10 @@ from oslo_log import log from oslo_utils import timeutils -from staffeln.common import constants -from staffeln.common import email -from staffeln.common import time as xtime import staffeln.conf from staffeln import objects +from staffeln.common import constants, email +from staffeln.common import time as xtime CONF = staffeln.conf.CONF LOG = log.getLogger(__name__) @@ -40,23 +39,23 @@ def send_result_email(self, project_id, subject=None, project_name=None): receiver = CONF.notification.receiver elif not CONF.notification.project_receiver_domain: try: - receiver = ( - self.backup_mgt.openstacksdk.get_project_member_emails( - project_id - ) + receiver = self.backup_mgt.openstacksdk.get_project_member_emails( + project_id ) if not receiver: LOG.warn( "No email can be found from members of project " f"{project_id}. " - "Skip report now and will try to report later.") + "Skip report now and will try to report later." + ) return False except Exception as ex: LOG.warn( "Failed to fetch emails from project members with " f"exception: {str(ex)} " "As also no receiver email or project receiver domain are " - "configured. Will try to report later.") + "configured. Will try to report later." + ) return False else: receiver_domain = CONF.notification.project_receiver_domain @@ -124,21 +123,31 @@ def publish(self, project_id=None, project_name=None): if success_tasks: success_volumes = "
".join( [ - (f"Volume ID: {str(e.volume_id)}, " - f"Backup ID: {str(e.backup_id)}, " - "Backup mode: " - f"{'Incremental' if e.incremental else 'Full'}, " - f"Created at: {str(e.created_at)}, Last updated at: " - f"{str(e.updated_at)}") for e in success_tasks]) + ( + f"Volume ID: {str(e.volume_id)}, " + f"Backup ID: {str(e.backup_id)}, " + "Backup mode: " + f"{'Incremental' if e.incremental else 'Full'}, " + f"Created at: {str(e.created_at)}, Last updated at: " + f"{str(e.updated_at)}" + ) + for e in success_tasks + ] + ) else: success_volumes = "
" if failed_tasks: failed_volumes = "
".join( [ - (f"Volume ID: {str(e.volume_id)}, " - f"Reason: {str(e.reason)}, " - f"Created at: {str(e.created_at)}, Last updated at: " - f"{str(e.updated_at)}") for e in failed_tasks]) + ( + f"Volume ID: {str(e.volume_id)}, " + f"Reason: {str(e.reason)}, " + f"Created at: {str(e.created_at)}, Last updated at: " + f"{str(e.updated_at)}" + ) + for e in failed_tasks + ] + ) else: failed_volumes = "
" html += ( @@ -152,7 +161,8 @@ def publish(self, project_id=None, project_name=None): "

Success List

" f"

{success_volumes}


" "

Failed List

" - f"

{failed_volumes}


") + f"

{failed_volumes}


" + ) self.content += html subject = f"Staffeln Backup result: {project_id}" reported = self.send_result_email( diff --git a/staffeln/conf/__init__.py b/staffeln/conf/__init__.py index 4da72a5..76c247e 100755 --- a/staffeln/conf/__init__.py +++ b/staffeln/conf/__init__.py @@ -2,11 +2,7 @@ from oslo_config import cfg -from staffeln.conf import api -from staffeln.conf import conductor -from staffeln.conf import database -from staffeln.conf import notify -from staffeln.conf import paths +from staffeln.conf import api, conductor, database, notify, paths CONF = cfg.CONF diff --git a/staffeln/conf/api.py b/staffeln/conf/api.py index 16db057..4f848eb 100755 --- a/staffeln/conf/api.py +++ b/staffeln/conf/api.py @@ -19,9 +19,7 @@ cfg.PortOpt( "port", default=8808, - help=_( - "Staffeln API listens on this port number for incoming requests." - ), + help=_("Staffeln API listens on this port number for incoming requests."), ), cfg.BoolOpt("enabled_ssl", default=False, help=_("ssl enabled")), cfg.StrOpt("ssl_key_file", default=False, help=_("ssl key file path")), diff --git a/staffeln/conf/conductor.py b/staffeln/conf/conductor.py index db0f840..86407cc 100755 --- a/staffeln/conf/conductor.py +++ b/staffeln/conf/conductor.py @@ -8,10 +8,7 @@ conductor_group = cfg.OptGroup( "conductor", title="Conductor Options", - help=_( - "Options under this group are used " - "to define Conductor's configuration." - ), + help=_("Options under this group are used " "to define Conductor's configuration."), ) backup_opts = [ @@ -60,9 +57,7 @@ ), cfg.StrOpt( "backup_metadata_key", - help=_( - "The key string of metadata the VM, which requres back up, has" - ), + help=_("The key string of metadata the VM, which requres back up, has"), ), cfg.StrOpt( "retention_metadata_key", @@ -122,8 +117,7 @@ "coordination", title="Coordination Options", help=_( - "Options under this group are used to define Coordination's" - "configuration." + "Options under this group are used to define Coordination's" "configuration." ), ) diff --git a/staffeln/conf/database.py b/staffeln/conf/database.py index f4fe98e..aa65873 100644 --- a/staffeln/conf/database.py +++ b/staffeln/conf/database.py @@ -17,9 +17,7 @@ ) SQL_OPTS = [ - cfg.StrOpt( - "mysql_engine", default="InnoDB", help=_("MySQL engine to use.") - ), + cfg.StrOpt("mysql_engine", default="InnoDB", help=_("MySQL engine to use.")), ] diff --git a/staffeln/conf/notify.py b/staffeln/conf/notify.py index bc3f4bf..c0834b1 100644 --- a/staffeln/conf/notify.py +++ b/staffeln/conf/notify.py @@ -7,9 +7,7 @@ notify_group = cfg.OptGroup( "notification", title="Notification options", - help=_( - "Options under this group are used to define notification settings." - ), + help=_("Options under this group are used to define notification settings."), ) email_opts = [ diff --git a/staffeln/conf/paths.py b/staffeln/conf/paths.py index 7341e48..08cf205 100644 --- a/staffeln/conf/paths.py +++ b/staffeln/conf/paths.py @@ -9,9 +9,7 @@ PATH_OPTS = [ cfg.StrOpt( "pybasedir", - default=os.path.abspath( - os.path.join(os.path.dirname(__file__), "../") - ), + default=os.path.abspath(os.path.join(os.path.dirname(__file__), "../")), help=_("Directory where the staffeln python module is installed."), ), cfg.StrOpt( diff --git a/staffeln/db/api.py b/staffeln/db/api.py index 3e22bde..5f6d2f8 100644 --- a/staffeln/db/api.py +++ b/staffeln/db/api.py @@ -6,9 +6,7 @@ from oslo_db import api as db_api _BACKEND_MAPPING = {"sqlalchemy": "staffeln.db.sqlalchemy.api"} -IMPL = db_api.DBAPI.from_config( - cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True -) +IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def get_instance(): diff --git a/staffeln/db/sqlalchemy/alembic/env.py b/staffeln/db/sqlalchemy/alembic/env.py index 970dcc7..18b6ee4 100644 --- a/staffeln/db/sqlalchemy/alembic/env.py +++ b/staffeln/db/sqlalchemy/alembic/env.py @@ -46,9 +46,7 @@ def run_migrations_online(): """ engine = sqla_api.get_engine() with engine.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) + context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() diff --git a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py index 6d53f0e..492009c 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py +++ b/staffeln/db/sqlalchemy/alembic/versions/041d9a0f1159_backup_add_names.py @@ -12,8 +12,8 @@ revision = "041d9a0f1159" down_revision = "" -from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 +from alembic import op # noqa: E402 def upgrade(): diff --git a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py index 4ebaf9f..5f87464 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py +++ b/staffeln/db/sqlalchemy/alembic/versions/2b2b9df199bd_add_reason_column_to_queue_data_table.py @@ -12,8 +12,8 @@ revision = "2b2b9df199bd" down_revision = "ebdbed01e9a7" -from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 +from alembic import op # noqa: E402 def upgrade(): diff --git a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py index 5635fd9..20605ee 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py +++ b/staffeln/db/sqlalchemy/alembic/versions/5b2e78435231_add_report_timestamp.py @@ -1,8 +1,8 @@ from __future__ import annotations +import sqlalchemy as sa from alembic import op from oslo_log import log -import sqlalchemy as sa """add report timestamp diff --git a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py index 45cc8a8..8dccd8b 100644 --- a/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py +++ b/staffeln/db/sqlalchemy/alembic/versions/ebdbed01e9a7_added_incremental_field.py @@ -12,16 +12,12 @@ revision = "ebdbed01e9a7" down_revision = "041d9a0f1159" -from alembic import op # noqa: E402 import sqlalchemy as sa # noqa: E402 +from alembic import op # noqa: E402 def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.add_column( - "backup_data", sa.Column("incremental", sa.Boolean(), nullable=True) - ) - op.add_column( - "queue_data", sa.Column("incremental", sa.Boolean(), nullable=True) - ) + op.add_column("backup_data", sa.Column("incremental", sa.Boolean(), nullable=True)) + op.add_column("queue_data", sa.Column("incremental", sa.Boolean(), nullable=True)) # ### end Alembic commands ### diff --git a/staffeln/db/sqlalchemy/api.py b/staffeln/db/sqlalchemy/api.py index 4919f28..b27d0d1 100644 --- a/staffeln/db/sqlalchemy/api.py +++ b/staffeln/db/sqlalchemy/api.py @@ -10,9 +10,7 @@ from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils as db_utils from oslo_log import log -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import uuidutils +from oslo_utils import strutils, timeutils, uuidutils from sqlalchemy.inspection import inspect from sqlalchemy.orm import exc @@ -185,8 +183,9 @@ def __add_simple_filter(self, query, model, fieldname, value, operator_): field = getattr(model, fieldname) if ( - fieldname != "deleted" and value and ( - field.type.python_type is datetime.datetime) + fieldname != "deleted" + and value + and (field.type.python_type is datetime.datetime) ): if not isinstance(value, datetime.datetime): value = timeutils.parse_isotime(value) @@ -347,9 +346,7 @@ def get_backup_by_backup_id(self, context, backup_id): """Get the column from the backup_data with matching backup_id""" try: - return self._get_backup( - context, fieldname="backup_id", value=backup_id - ) + return self._get_backup(context, fieldname="backup_id", value=backup_id) except Exception: # noqa: E722 LOG.error("Backup not found with backup_id %s." % backup_id) @@ -379,9 +376,7 @@ def get_report_timestamp_list(self, *args, **kwargs): def create_report_timestamp(self, values): try: - report_timestamp_data = self._create( - models.Report_timestamp, values - ) + report_timestamp_data = self._create(models.Report_timestamp, values) except db_exc.DBDuplicateEntry: LOG.error("Report Timestamp ID already exists.") return report_timestamp_data diff --git a/staffeln/exception.py b/staffeln/exception.py index 3f8a34e..e1caceb 100644 --- a/staffeln/exception.py +++ b/staffeln/exception.py @@ -16,8 +16,7 @@ """Staffeln base exception handling.""" from __future__ import annotations -from typing import Optional -from typing import Union +from typing import Optional, Union from oslo_log import log as logging diff --git a/staffeln/objects/base.py b/staffeln/objects/base.py index 60a6fe2..7f3cb85 100755 --- a/staffeln/objects/base.py +++ b/staffeln/objects/base.py @@ -33,9 +33,7 @@ class StaffelnObject(ovoo_base.VersionedObject): OBJ_PROJECT_NAMESPACE = "staffeln" def as_dict(self): - return { - k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k) - } + return {k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)} class StaffelnObjectSerializer(ovoo_base.VersionedObjectSerializer): @@ -53,14 +51,9 @@ class StaffelnPersistentObject(ovoo_base.VersionedObject): object_fields = {} def obj_refresh(self, loaded_object): - fields = ( - field for field in self.fields if field not in self.object_fields - ) + fields = (field for field in self.fields if field not in self.object_fields) for field in fields: - if ( - self.obj_attr_is_set(field) and ( - self[field] != loaded_object[field]) - ): + if self.obj_attr_is_set(field) and (self[field] != loaded_object[field]): self[field] = loaded_object[field] @staticmethod diff --git a/staffeln/objects/report.py b/staffeln/objects/report.py index 7bc1885..588be73 100644 --- a/staffeln/objects/report.py +++ b/staffeln/objects/report.py @@ -26,9 +26,7 @@ class ReportTimestamp( @base.remotable_classmethod def list(cls, context, filters=None): # pylint: disable=E0213 - db_report = cls.dbapi.get_report_timestamp_list( - context, filters=filters - ) + db_report = cls.dbapi.get_report_timestamp_list(context, filters=filters) return [cls._from_db_object(cls(context), obj) for obj in db_report] @base.remotable diff --git a/staffeln/objects/volume.py b/staffeln/objects/volume.py index 88f952a..f8b6e80 100644 --- a/staffeln/objects/volume.py +++ b/staffeln/objects/volume.py @@ -36,9 +36,7 @@ def list(cls, context, filters=None, **kwargs): # pylint: disable=E0213 :param filters: dict mapping the filter to a value. """ - db_backups = cls.dbapi.get_backup_list( - context, filters=filters, **kwargs - ) + db_backups = cls.dbapi.get_backup_list(context, filters=filters, **kwargs) return [cls._from_db_object(cls(context), obj) for obj in db_backups] @@ -79,9 +77,7 @@ def delete_backup(self): self.dbapi.soft_delete_backup(self.id) @base.remotable_classmethod - def get_backup_by_backup_id( - cls, context, backup_id - ): # pylint: disable=E0213 + def get_backup_by_backup_id(cls, context, backup_id): # pylint: disable=E0213 """Find a backup based on backup_id :param context: Security context. NOTE: This should only diff --git a/staffeln/tests/common/test_openstacksdk.py b/staffeln/tests/common/test_openstacksdk.py index e516a30..ceeece9 100644 --- a/staffeln/tests/common/test_openstacksdk.py +++ b/staffeln/tests/common/test_openstacksdk.py @@ -3,11 +3,11 @@ from unittest import mock -from openstack import exceptions as openstack_exc import tenacity +from openstack import exceptions as openstack_exc -from staffeln.common import openstack as s_openstack from staffeln import conf +from staffeln.common import openstack as s_openstack from staffeln.tests import base @@ -46,20 +46,17 @@ def setUp(self): self.fake_role_assignment = mock.MagicMock(user="foo") self.fake_role_assignment2 = mock.MagicMock(user={"id": "bar"}) - def _test_http_error( - self, m_func, retry_func, status_code, call_count=1, **kwargs - ): - m_func.side_effect = openstack_exc.HttpException( - http_status=status_code - ) + def _test_http_error(self, m_func, retry_func, status_code, call_count=1, **kwargs): + m_func.side_effect = openstack_exc.HttpException(http_status=status_code) exc = self.assertRaises( openstack_exc.HttpException, getattr(self.openstack, retry_func), **kwargs, ) self.assertEqual(status_code, exc.status_code) - skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace( - ' ', '').split(',') + skip_retry_codes = conf.CONF.openstack.skip_retry_codes.replace(" ", "").split( + "," + ) if str(status_code) not in skip_retry_codes: if call_count == 1: self.m_sleep.assert_called_once_with(1.0) @@ -72,9 +69,7 @@ def _test_http_error( def _test_non_http_error(self, m_func, retry_func, **kwargs): m_func.side_effect = KeyError - self.assertRaises( - KeyError, getattr(self.openstack, retry_func), **kwargs - ) + self.assertRaises(KeyError, getattr(self.openstack, retry_func), **kwargs) self.m_sleep.assert_not_called() def test_get_servers(self): @@ -88,28 +83,20 @@ def test_get_servers_non_http_error(self): self._test_non_http_error(self.m_c.compute.servers, "get_servers") def test_get_servers_conf_skip_http_error(self): - conf.CONF.set_override('skip_retry_codes', '403,', 'openstack') - self._test_http_error( - self.m_c.compute.servers, "get_servers", status_code=403 - ) - self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes) + conf.CONF.set_override("skip_retry_codes", "403,", "openstack") + self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=403) + self.assertEqual("403,", conf.CONF.openstack.skip_retry_codes) def test_get_servers_conf_skip_http_error_not_hit(self): - conf.CONF.set_override('skip_retry_codes', '403,', 'openstack') - self._test_http_error( - self.m_c.compute.servers, "get_servers", status_code=404 - ) - self.assertEqual('403,', conf.CONF.openstack.skip_retry_codes) + conf.CONF.set_override("skip_retry_codes", "403,", "openstack") + self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=404) + self.assertEqual("403,", conf.CONF.openstack.skip_retry_codes) def test_get_servers_404_http_error(self): - self._test_http_error( - self.m_c.compute.servers, "get_servers", status_code=404 - ) + self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=404) def test_get_servers_500_http_error(self): - self._test_http_error( - self.m_c.compute.servers, "get_servers", status_code=500 - ) + self._test_http_error(self.m_c.compute.servers, "get_servers", status_code=500) def test_get_projects(self): self.m_c.list_projects = mock.MagicMock(return_value=[]) @@ -120,14 +107,10 @@ def test_get_projects_non_http_error(self): self._test_non_http_error(self.m_c.list_projects, "get_projects") def test_get_projects_404_http_error(self): - self._test_http_error( - self.m_c.list_projects, "get_projects", status_code=404 - ) + self._test_http_error(self.m_c.list_projects, "get_projects", status_code=404) def test_get_projects_500_http_error(self): - self._test_http_error( - self.m_c.list_projects, "get_projects", status_code=500 - ) + self._test_http_error(self.m_c.list_projects, "get_projects", status_code=500) def test_get_user_id(self): self.m_c.get_user = mock.MagicMock(return_value=self.fake_user) @@ -138,14 +121,10 @@ def test_get_user_id_non_http_error(self): self._test_non_http_error(self.m_c.get_user, "get_user_id") def test_get_user_id_404_http_error(self): - self._test_http_error( - self.m_c.get_user, "get_user_id", status_code=404 - ) + self._test_http_error(self.m_c.get_user, "get_user_id", status_code=404) def test_get_user_id_500_http_error(self): - self._test_http_error( - self.m_c.get_user, "get_user_id", status_code=500 - ) + self._test_http_error(self.m_c.get_user, "get_user_id", status_code=500) def test_get_user(self): self.m_c.get_user = mock.MagicMock(return_value=self.fake_user) @@ -177,9 +156,7 @@ def test_get_user_500_http_error(self): def test_get_role_assignments(self): self.m_c.list_role_assignments = mock.MagicMock(return_value=[]) - self.assertEqual( - self.openstack.get_role_assignments(project_id="foo"), [] - ) + self.assertEqual(self.openstack.get_role_assignments(project_id="foo"), []) self.m_c.list_role_assignments.assert_called_once_with( filters={"project": "foo"} ) @@ -226,9 +203,7 @@ def test_get_project_member_emails(self): self.m_c.get_user.assert_has_calls( [ mock.call(name_or_id=self.fake_role_assignment.user), - mock.call( - name_or_id=self.fake_role_assignment2.user.get("id") - ), + mock.call(name_or_id=self.fake_role_assignment2.user.get("id")), ] ) @@ -257,13 +232,9 @@ def test_get_project_member_emails_500_http_error(self): ) def test_get_volume(self): - self.m_c.get_volume_by_id = mock.MagicMock( - return_value=self.fake_volume - ) + self.m_c.get_volume_by_id = mock.MagicMock(return_value=self.fake_volume) self.assertEqual( - self.openstack.get_volume( - uuid=self.fake_volume.id, project_id="bar" - ), + self.openstack.get_volume(uuid=self.fake_volume.id, project_id="bar"), self.fake_volume, ) self.m_c.get_volume_by_id.assert_called_once_with(self.fake_volume.id) @@ -295,13 +266,9 @@ def test_get_volume_500_http_error(self): ) def test_get_backup(self): - self.m_c.get_volume_backup = mock.MagicMock( - return_value=self.fake_backup - ) + self.m_c.get_volume_backup = mock.MagicMock(return_value=self.fake_backup) self.assertEqual( - self.openstack.get_backup( - uuid=self.fake_backup.id, project_id="bar" - ), + self.openstack.get_backup(uuid=self.fake_backup.id, project_id="bar"), self.fake_backup, ) self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id) @@ -311,9 +278,7 @@ def test_get_backup_not_found(self): side_effect=openstack_exc.ResourceNotFound ) self.assertEqual( - self.openstack.get_backup( - uuid=self.fake_backup.id, project_id="bar" - ), + self.openstack.get_backup(uuid=self.fake_backup.id, project_id="bar"), None, ) self.m_c.get_volume_backup.assert_called_once_with(self.fake_backup.id) @@ -345,13 +310,9 @@ def test_get_backup_500_http_error(self): ) def test_delete_backup(self): - self.m_c.delete_volume_backup = mock.MagicMock( - return_value=self.fake_backup - ) + self.m_c.delete_volume_backup = mock.MagicMock(return_value=self.fake_backup) self.assertEqual( - self.openstack.delete_backup( - uuid=self.fake_backup.id, project_id="bar" - ), + self.openstack.delete_backup(uuid=self.fake_backup.id, project_id="bar"), None, ) self.m_c.delete_volume_backup.assert_called_once_with( @@ -363,9 +324,7 @@ def test_delete_backup_not_found(self): side_effect=openstack_exc.ResourceNotFound ) self.assertEqual( - self.openstack.delete_backup( - uuid=self.fake_backup.id, project_id="bar" - ), + self.openstack.delete_backup(uuid=self.fake_backup.id, project_id="bar"), None, ) self.m_c.delete_volume_backup.assert_called_once_with( @@ -438,9 +397,7 @@ def test_get_backup_gigabytes_quota(self, m_j_r): self.m_c.block_storage.get = mock.MagicMock(status_code=200) self.m_gam = mock.MagicMock() self.m_c._get_and_munchify = self.m_gam - self.m_gam.return_value = mock.MagicMock( - backup_gigabytes=[self.fake_backup.id] - ) + self.m_gam.return_value = mock.MagicMock(backup_gigabytes=[self.fake_backup.id]) self.assertEqual( [self.fake_backup.id], self.openstack.get_backup_gigabytes_quota(project_id="bar"), @@ -497,7 +454,5 @@ def test_get_volume_quotas_no_usage(self, m_j_r): self.m_gam_return, self.openstack._get_volume_quotas(project_id="bar", usage=False), ) - self.m_c.block_storage.get.assert_called_once_with( - "/os-quota-sets/bar" - ) + self.m_c.block_storage.get.assert_called_once_with("/os-quota-sets/bar") self.m_gam.assert_called_once_with("quota_set", m_j_r()) From 0a1c2d05b06ded60fdec2e43981fd8b8738d1d8e Mon Sep 17 00:00:00 2001 From: ricolin Date: Wed, 11 Dec 2024 11:19:23 +0800 Subject: [PATCH 8/8] add gate jobs --- zuul.d/project.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml index 065d68e..a62642a 100644 --- a/zuul.d/project.yaml +++ b/zuul.d/project.yaml @@ -3,3 +3,7 @@ jobs: - staffeln-linters - staffeln-unit + gate: + jobs: + - staffeln-linters + - staffeln-unit